prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import sys
import tkinter as Tk
import tkinter.filedialog
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from scipy.optimize import curve_fit
from scipy.signal import butter, lfilter
import pandas as pd
import xlrd
from astropy.table import Table
import xlsxwriter
from tkinter import ttk
import warnings
warnings.filterwarnings('ignore')
warnings.filterwarnings("ignore", "(?s).*MATPLOTLIBDATA.*", category=UserWarning)
root = Tk.Tk()
#Initialize global variables for analysis
Delete=1; # Number of initial points to delete
Clipid=15e-3 # Concentration of the Lipid in the syringe [M]
Cdrug=100e-6 # Concentration of the Drug in the cell [M]
Vw=200e-6 # Volume of the Cell [L]
vL=(1398e-27)*(6.0221409e23) #Vesicles Volume
aL=(7e-19)*(6.0221409e23) #Vesicles Area
aD=(3e-19)*(6.0221409e23) #Drug Area
R=8.314459848; #J/(mol*Kelvin)
T=298.15; #Kelvin
F=96485.336521; #C/mol
eps0=8.85*1e-12; #Farads/meter
eps=78.4; #Farads/meter
SaltC=0.15; #moles/L Salt concentration
zdrug= 1; #Charge of drug
#Initializing GUI global variables
sheet = None
a = None
dh_drug = None
injv_drug = None
sheet2 = None
a2 = None
dh_control = None
injv_control = None
savename=''
CD = None
label5 = None
drug_charge_prev = None
canvas=None
canvas2=None
lipidselect=None
lipid_col=3
label4 = None
label3=None
#Reset GUI
def Reset1():
python = sys.executable
os.execl(python, python, * sys.argv)
#Initial GUI screen, select data directory/files
def Command1():
global CD
global label3
global label4
global label5
if label3 != None:
label3.destroy()
if savename == '': #Choosing directory for the first time
root.directory = Tk.filedialog.askdirectory()
CD=root.directory
label3=Tk.Label(root,text=CD)
label3.grid(row=0,column=1,sticky=Tk.W,columnspan=6)
label4=Tk.Label(root,text='Select ITC Experiment File - Drug')
label4.grid(row=1,column=1,sticky=Tk.W)
label5=Tk.Label(root,text='Select ITC Background File - Control (Optional)')
label5.grid(row=2,column=1,sticky=Tk.W)
else: #Reselecting directory
label4.destroy()
label5.destroy()
root.directory = Tk.filedialog.askdirectory()
CD=root.directory
label3=Tk.Label(root,text=CD)
label3.grid(row=0,column=1,sticky=Tk.W,columnspan=6)
label4=Tk.Label(root,text='Select ITC Experiment File - Drug')
label4.grid(row=1,column=1,sticky=Tk.W)
label5=Tk.Label(root,text='Select ITC Background File - Control (Optional)')
label5.grid(row=3,column=1,sticky=Tk.W)
#Choose drug file
def Drug1():
global a
global sheet
global savename
global dh_drug
global injv_drug
global label1001
global entry1001
global label1002
global entry1002
global label1003
global entry1003
global label1004
global entry1004
global label1005
global entry1005
global label1006
global entry1006
global label1007
global entry1007
global label1008
global entry1008
global label1009
global entry1009
global button3
global label5
global label4
global button99
global lipidselect
global label_lip_area
global label_lip_area_e
global label_lip_thickness
global label_lip_thickness_e
global button10
global lipid_col
#User can choose experiment drug excel file, which is then read
root.filename = Tk.filedialog.askopenfilename(initialdir = root.directory,title = "Select file",filetypes = (("XLS","*.xls"),("XLSX","*.xlsx"),("all files","*.*")))
df = pd.read_excel(root.filename,)
a=df.shape
wb = xlrd.open_workbook(root.filename)
sheet = wb.sheet_by_index(0)
label4.destroy()
label4=Tk.Label(root,text=root.filename)
label4.grid(row=1,column=1,sticky=Tk.W)
savename=root.filename
#User can select columns for the heat and injection volume from excel file
button3.destroy()
label5.destroy()
button99.destroy()
labeldh1=Tk.Label(root,text='Column for Heat (DH):')
labelinjv1=Tk.Label(root,text='Column for Injection Volume (INJV):')
entrydh1=Tk.Entry(root,width=5)
entryinjv1=Tk.Entry(root,width=5)
labeldh1.grid(row=1,column=3,sticky=Tk.W,pady=(2,2), padx=(5,0))
entrydh1.grid(row=1,column=4,sticky=Tk.W)
entrydh1.insert(Tk.END, '0')
dh_drug = int(entrydh1.get())
labelinjv1.grid(row=2,column=3,sticky=Tk.W,pady=(2,2), padx=(5,0))
entryinjv1.grid(row=2,column=4,sticky=Tk.W)
entryinjv1.insert(Tk.END, '1')
injv_drug = int(entryinjv1.get())
#Moving buttons and labels in GUI to make it look nice
button3=Tk.Button(text='Select Control Background File',fg='blue',command=Background1,width=25)
button3.grid(row=3,column=0,sticky=Tk.W)
label5=Tk.Label(root,text='Select ITC Background File - Control (Optional)')
label5.grid(row=3,column=1,sticky=Tk.W)
button99=Tk.Button(text='Run ITC Analysis',fg='black',command=testing,height=5,width=25)
button99.grid(row=4,column=0,sticky=Tk.W,columnspan=2,rowspan=5)
lipid_col = 5
label1001.grid(row=1,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1001.grid(row=1,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
label1002.grid(row=2,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1002.grid(row=2,column=6,sticky=Tk.W,pady=(2,2), padx=(0,))
label1003.grid(row=3,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1003.grid(row=3,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
label1004.grid(row=4,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1004.grid(row=4,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
label1007.grid(row=8,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1007.grid(row=8,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
label1008.grid(row=9,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1008.grid(row=9,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
label1009.grid(row=10,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1009.grid(row=10,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
button8.grid(row=0,column=8,sticky=Tk.E)
label10100.grid(row=5,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
labelarrow.grid(row=6,column=6,sticky=Tk.W,pady=(2,2), padx=(70,0), columnspan=2, rowspan=2)
label1005.grid(row=6,column=6,sticky=Tk.W,pady=(2,2), padx=(95,0), columnspan=2)
entry1005.grid(row=6,column=8,sticky=Tk.W,pady=(2,2), padx=(0,5))
label1006.grid(row=7,column=6,sticky=Tk.W,pady=(2,2), padx=(95,0), columnspan=2)
entry1006.grid(row=7,column=8,sticky=Tk.W,pady=(2,2), padx=(0,5))
label_lip_area.grid(row=6,column=5,sticky=Tk.W,pady=(2,2), padx=(90,0))
label_lip_area_e.grid(row=6,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
label_lip_thickness.grid(row=7,column=5,sticky=Tk.W,pady=(2,2), padx=(90,0))
label_lip_thickness_e.grid(row=7,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
lipidselect.grid(column = 6, row = 5, sticky= Tk.W)
button10.grid(row=5,column=7,sticky=Tk.W,pady=(2,2), padx=(5,0)) #15
#Choose background file
def Background1():
global sheet2
global a2
global dh_control
global injv_control
global button99
global label5
label5.destroy()
#User can choose experiment drug excel file, which is then read
root.filename = Tk.filedialog.askopenfilename(initialdir = root.directory,title = "Select file",filetypes = (("XLS","*.xls"),("XLSX","*.xlsx"),("all files","*.*")))
df2 = pd.read_excel(root.filename,)
a2=df2.shape
wb2 = xlrd.open_workbook(root.filename)
sheet2 = wb2.sheet_by_index(0)
label5=Tk.Label(root,text=root.filename)
label5.grid(row=3,column=1,sticky=Tk.W)
#User can select columns for the heat and injection volume from excel file
labeldh2=Tk.Label(root,text='Column for Heat (DH):')
labelinjv2=Tk.Label(root,text='Column for Injection Volume (INJV):')
entrydh2=Tk.Entry(root,width=5)
entryinjv2=Tk.Entry(root,width=5)
labeldh2.grid(row=3,column=3,sticky=Tk.W,pady=(2,2), padx=(5,0))
entrydh2.grid(row=3,column=4,sticky=Tk.W)
entrydh2.insert(Tk.END, '0')
dh_control = int(entrydh2.get())
labelinjv2.grid(row=4,column=3,sticky=Tk.W,pady=(2,2), padx=(5,0))
entryinjv2.grid(row=4,column=4,sticky=Tk.W)
entryinjv2.insert(Tk.END, '1')
injv_control = int(entryinjv2.get())
button99.destroy()
button99=Tk.Button(text='Run ITC Analysis',fg='black',command=testing,height=5,width=25)
button99.grid(row=5,column=0,sticky=Tk.W,columnspan=2,rowspan=5)
#Run ITC analysis
def testing():
global sheet
global a
global sheet2
global a2
global savename
global CD
global injv_drug
global injv_control
global dh_drug
global dh_control
global drug_charge_prev
global canvas
global canvas2
savename = savename.split('.')[0]
#Get parameters from GUI
Delete=int(entry1001.get())
Clipid=float(entry1002.get())
Cdrug=float(entry1003.get())
Vw=float(entry1004.get())
vL_str = entry1005.get()
if '*' in vL_str:
vL_arr = vL_str.split('*')
if '(' in vL_str:
vL =float(vL_arr[0].strip("()"))*float(vL_arr[1].strip("()"))
else:
vL =float(vL_arr[0])*float(vL_arr[1])
else:
if '(' in vL_str:
vL = float(vL_str.strip("()"))
else:
vL = float(vL_str)
aL_str = entry1006.get()
if '*' in aL_str:
aL_arr = aL_str.split('*')
if '(' in aL_str:
aL =float(aL_arr[0].strip("()"))*float(aL_arr[1].strip("()"))
else:
aL =float(aL_arr[0])*float(aL_arr[1])
else:
if '(' in aL_str:
aL = float(aL_str.strip("()"))
else:
aL = float(aL_str)
aD_str = entry1007.get()
if '*' in aD_str:
aD_arr = aD_str.split('*')
if '(' in aD_str:
aD =float(aD_arr[0].strip("()"))*float(aD_arr[1].strip("()"))
else:
aD =float(aD_arr[0])*float(aD_arr[1])
else:
if '(' in aD_str:
aD = float(aD_str.strip("()"))
else:
aD = float(aD_str)
R=8.314459848; #J/(mol*Kelvin)
T=298.15; #Kelvin
F=96485.336521; #C/mol
eps0=8.85*1e-12; #Farads/meter
eps=78.4; #Farads/meter
SaltC=float(entry1008.get())
zdrug=int(entry1009.get())
#Define fit functions used for Kp and Phi
def func1(x, dH, Kp):
return (Vw*Cdrug)*Vw*Kp*Vinj_add*Clipid*aL*dH*(1e6)/np.power((Vw+(x-0.5)*Vinj_add*(1+Clipid*aL*Kp)),2)
def func2(X, dH, Kp):
x,Phi = X
return 1e6*((dH*(Vw*Cdrug)*x*const1*Kp*np.exp(-beta*Phi)/(Vw+x*Vinj+x*const1*Kp*np.exp(-beta*Phi)))-(dH*(Vw*Cdrug)*(x-1)*const1*Kp*np.exp(-beta*Phi)/(Vw+(x-1)*Vinj+(x-1)*const1*Kp*np.exp(-beta*Phi))))
#Getting values for heats and injection volumes from excel file
DH=[]
Vinj=[]
Inj=[]
for i in range(a[0]-1-Delete):
DH_add=sheet.cell_value(i+1+Delete, dh_drug)
DH.append(DH_add)
Vinj_add=sheet.cell_value(i+1+Delete, injv_drug)
Vinj_add=Vinj_add/1e6
Vinj.append(Vinj_add)
Inj.append(i+1)
DH=np.array(DH)
Vinj=np.array(Vinj)
Inj=np.array(Inj)
if sheet2 != None:
DH2=[]
for i in range(a2[0]-1-Delete):
DH_add2=sheet2.cell_value(i+1+Delete, dh_drug)
DH2.append(DH_add2)
DH3=np.array(DH2)
if DH.shape[0]>DH3.shape[0]:
for i in range(DH.shape[0]-DH3.shape[0]):
DH2.append(0)
DH2=np.array(DH2)
if DH.shape[0]<DH3.shape[0]:
DH2=np.array(DH2)
DH2=DH2[range(DH.shape[0])]
DH=DH-DH2
xdata = Inj
ydata = DH
#Clears previous graphs and output text if present
if drug_charge_prev != None:
for ix in range(4):
labempty = Tk.Label(root,text=' ' * 280)
labempty.grid(row=14+ix,column=0,columnspan=4)
canvas.get_tk_widget().destroy()
if drug_charge_prev != 0 and canvas2 != None:
canvas2.get_tk_widget().destroy()
#Display Kp graph
f = Figure(figsize=(5, 4), dpi=100)
aa = f.add_subplot(111)
aa.plot(xdata,ydata,'.')
aa.set_xlabel('Injection Number')
aa.set_xticks(np.arange(0, np.max(xdata)+1, step=2))
aa.set_ylabel('\u03BC cal')
canvas = FigureCanvasTkAgg(f, master=root)
canvas.draw()
if zdrug == 0:
canvas.get_tk_widget().grid(row=12,column=1, columnspan=3, pady=10, padx=10)
else:
canvas.get_tk_widget().grid(row=12,column=0, columnspan=2, pady=10, padx=10)
#Fit for Kp
Kp=1
dH=ydata[0]*T
dHeat=(0,ydata[0]*np.inf)
for i in range(1000):
popt, pcov = curve_fit(func1, xdata, ydata, p0=[dH,Kp], bounds=([np.min(dHeat), 1e-10], [np.max(dHeat), 10e10]))
residuals2 = ydata- func1(xdata, *popt)
ss_res2 = np.sum(residuals2**2)
ss_tot2 = np.sum((ydata-np.mean(ydata))**2)
r_squared2 = 1 - (ss_res2 / ss_tot2)
dH2=popt[0]
Kp2=popt[1]
dG2=-1.9858775*298.15*np.log(Kp2*(aL/vL))
TdS2=dH2-dG2
if np.abs(Kp-Kp2)+np.abs(dH-dH2)<=0:
break
else:
Kp=Kp2
dH=dH2
Sample1=[]
Sample1.append([Kp2*(aL/vL),dH2,dG2,TdS2,r_squared2,ss_res2])
aa.plot(xdata, func1(xdata, *popt))
f.savefig(CD+'/figure1_Kp.png')
xdata_np = np.array(xdata)
fit_yvals = np.array(func1(xdata, *popt))
fit_table2 = []
for fiti in range(0,len(xdata_np)):
xx = xdata[fiti]
yy = fit_yvals[fiti]
minilst = [xx,yy]
fit_table2.append(minilst)
fit_table2 = np.array(fit_table2)
fit_df = pd.DataFrame(data=fit_table2, index=None, columns=["Injection Number", "Heat (ucal)"])
writer = | pd.ExcelWriter((savename+'fit.xlsx'), engine='xlsxwriter') | pandas.ExcelWriter |
import inspect
import os
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.model_understanding.graphs import visualize_decision_tree
from evalml.pipelines.components import ComponentBase
from evalml.utils.gen_utils import (
SEED_BOUNDS,
_convert_to_woodwork_structure,
_convert_woodwork_types_wrapper,
_rename_column_names_to_numeric,
classproperty,
convert_to_seconds,
drop_rows_with_nans,
get_importable_subclasses,
get_random_seed,
import_or_raise,
infer_feature_types,
jupyter_check,
pad_with_nans,
save_plot
)
@patch('importlib.import_module')
def test_import_or_raise_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml")
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message")
with pytest.raises(Exception, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib")
def test_import_or_raise_imports():
math = import_or_raise("math", "error message")
assert math.ceil(0.1) == 1
def test_convert_to_seconds():
assert convert_to_seconds("10 s") == 10
assert convert_to_seconds("10 sec") == 10
assert convert_to_seconds("10 second") == 10
assert convert_to_seconds("10 seconds") == 10
assert convert_to_seconds("10 m") == 600
assert convert_to_seconds("10 min") == 600
assert convert_to_seconds("10 minute") == 600
assert convert_to_seconds("10 minutes") == 600
assert convert_to_seconds("10 h") == 36000
assert convert_to_seconds("10 hr") == 36000
assert convert_to_seconds("10 hour") == 36000
assert convert_to_seconds("10 hours") == 36000
with pytest.raises(AssertionError, match="Invalid unit."):
convert_to_seconds("10 years")
def test_get_random_seed_rng():
def make_mock_random_state(return_value):
class MockRandomState(np.random.RandomState):
def __init__(self):
self.min_bound = None
self.max_bound = None
super().__init__()
def randint(self, min_bound, max_bound):
self.min_bound = min_bound
self.max_bound = max_bound
return return_value
return MockRandomState()
rng = make_mock_random_state(42)
assert get_random_seed(rng) == 42
assert rng.min_bound == SEED_BOUNDS.min_bound
assert rng.max_bound == SEED_BOUNDS.max_bound
def test_get_random_seed_int():
# ensure the invariant "min_bound < max_bound" is enforced
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=0)
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=-1)
# test default boundaries to show the provided value should modulate within the default range
assert get_random_seed(SEED_BOUNDS.max_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.max_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.max_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.max_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.max_bound + 2) == SEED_BOUNDS.min_bound + 2
assert get_random_seed(SEED_BOUNDS.min_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.min_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.min_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.min_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.min_bound + 2) == SEED_BOUNDS.min_bound + 2
# vectorize get_random_seed via a wrapper for easy evaluation
default_min_bound = inspect.signature(get_random_seed).parameters['min_bound'].default
default_max_bound = inspect.signature(get_random_seed).parameters['max_bound'].default
assert default_min_bound == SEED_BOUNDS.min_bound
assert default_max_bound == SEED_BOUNDS.max_bound
def get_random_seed_vec(min_bound=None, max_bound=None): # passing None for either means no value is provided to get_random_seed
def get_random_seed_wrapper(random_seed):
return get_random_seed(random_seed,
min_bound=min_bound if min_bound is not None else default_min_bound,
max_bound=max_bound if max_bound is not None else default_max_bound)
return np.vectorize(get_random_seed_wrapper)
# ensure that regardless of the setting of min_bound and max_bound, the output of get_random_seed always stays
# between the min_bound (inclusive) and max_bound (exclusive), and wraps neatly around that range using modular arithmetic.
vals = np.arange(-100, 100)
def make_expected_values(vals, min_bound, max_bound):
return np.array([i if (min_bound <= i and i < max_bound) else ((i - min_bound) % (max_bound - min_bound)) + min_bound
for i in vals])
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=None)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=10)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=None)(vals),
make_expected_values(vals, min_bound=-10, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=0, max_bound=5)(vals),
make_expected_values(vals, min_bound=0, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=0)(vals),
make_expected_values(vals, min_bound=-5, max_bound=0))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=5)(vals),
make_expected_values(vals, min_bound=-5, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=5, max_bound=10)(vals),
make_expected_values(vals, min_bound=5, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=-5)(vals),
make_expected_values(vals, min_bound=-10, max_bound=-5))
def test_class_property():
class MockClass:
name = "MockClass"
@classproperty
def caps_name(cls):
return cls.name.upper()
assert MockClass.caps_name == "MOCKCLASS"
def test_get_importable_subclasses_wont_get_custom_classes():
class ChildClass(ComponentBase):
pass
assert ChildClass not in get_importable_subclasses(ComponentBase)
@patch('importlib.import_module')
def test_import_or_warn_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml", warning=True)
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message", warning=True)
with pytest.warns(UserWarning, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib", warning=True)
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check_errors(mock_import_or_raise):
mock_import_or_raise.side_effect = ImportError
assert not jupyter_check()
mock_import_or_raise.side_effect = Exception
assert not jupyter_check()
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check(mock_import_or_raise):
mock_import_or_raise.return_value = MagicMock()
mock_import_or_raise().core.getipython.get_ipython.return_value = True
assert jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = False
assert not jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = None
assert not jupyter_check()
def _check_equality(data, expected, check_index_type=True):
if isinstance(data, pd.Series):
pd.testing.assert_series_equal(data, expected, check_index_type)
else:
pd.testing.assert_frame_equal(data, expected, check_index_type)
@pytest.mark.parametrize("data,num_to_pad,expected",
[(pd.Series([1, 2, 3]), 1, pd.Series([np.nan, 1, 2, 3])),
(pd.Series([1, 2, 3]), 0, | pd.Series([1, 2, 3]) | pandas.Series |
from bitstring import BitArray
import pandas as pd
from collections import *
import pickle
import multiprocessing
from multiprocessing.dummy import Pool
import random
from MIN_HASH_class import MIN_HASH
from HashTable_class import HashTable
from ex1_functions import *
class LSH:
def __init__(self, minhash_len, num_band):
assert minhash_len % num_band == 0, "the choosen number of band does not hold the following assertion: minhash_len % num_band == 0"
self.minhash_len = minhash_len
self.num_band = num_band
self.hash_tables = list()
self.minhash_dict = dict()
for i in range(self.num_band):
self.hash_tables.append(HashTable())
def addMinHash(self, minhash):
self.minhash_dict[minhash.label] = minhash
self._create_store_band(minhash.hashvalues, minhash.label)
def _create_store_band(self, vec, label):
row_per_band = self.minhash_len // self.num_band
subVec = []
for i in range(0,self.minhash_len, row_per_band):
subVec.append(vec[i:i+row_per_band])
for band, table in zip(subVec, self.hash_tables):
table.setitem(band, label)
def _create_band(self, vec):
row_per_band = self.minhash_len // self.num_band
subVec = []
for i in range(0,self.minhash_len, row_per_band):
subVec.append(vec[i:i+row_per_band])
return subVec
def query_lsh(self, minhash_query):
subVec_query = self._create_band(minhash_query.hashvalues)
match = set()
similarities = []
for table, band in zip(self.hash_tables, subVec_query):
key = 0
for i in band:
key ^= fibonacci_hash_float(i) ^ key
if(table.hash_table.get(key, "NA") != "NA"):
match.update(tuple(table.hash_table.get(key)))
for m in match:
similarities.append((self.minhash_dict[m].jaccard(minhash_query), m.name))
similarities = [ i for i in sorted(similarities, reverse=True)]
return similarities
def info(self):
print("Numero di tabelle: " + str(self.num_tables))
print("Elementi per tabella: " + str(len(self.hash_tables[0].hash_table)))
#=================================================================================================
def query(bands, minhashes, query_minhashes, num_perm = 128):
for b in bands:
out = []
lsh = LSH(minhash_len=num_perm, num_band=b)
print("==========================\nQuery with " + str(b) + " number of bands:\n")
for i in minhashes:
lsh.addMinHash(i)
for query in query_minhashes:
dfout = | pd.DataFrame() | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:annorxiver]
# language: python
# name: conda-env-annorxiver-py
# ---
# # Restructure Hazard Function Calculations
# This notebook builds off of Marvin's work where I change the KaplanMeier Estimators to reflect days rather than years. Given that the lines are a bit difficult to read, I decided to report the values in the form of halflife estimates.
# +
from datetime import timedelta, date
from pathlib import Path
from lifelines import KaplanMeierFitter
from lifelines.utils import median_survival_times
import numpy as np
import pandas as pd
import plotnine as p9
import tqdm
from mizani.breaks import date_breaks
from mizani.formatters import timedelta_format
# -
# # Load the Data
published_dates = pd.read_csv(
"../publication_delay_experiment/output/biorxiv_published_dates.tsv", sep="\t"
).assign(
preprint_date=lambda x: pd.to_datetime(x.preprint_date.tolist()),
published_date=lambda x: pd.to_datetime(
x.published_date.apply(lambda y: y[0 : y.index(":")] if ":" in y else y)
),
)
print(published_dates.shape)
published_dates.head()
biorxiv_journal_df = (
pd.read_csv("../journal_tracker/output/mapped_published_doi.tsv", sep="\t")
.groupby("preprint_doi")
.agg(
{
"document": "first",
"category": "first",
"preprint_doi": "count",
"published_doi": "first",
"pmcid": "first",
"pmcoa": "first",
"posted_date": "first",
}
)
.rename(index=str, columns={"preprint_doi": "version_count"})
.reset_index()
)
print(biorxiv_journal_df.shape)
biorxiv_journal_df.head()
preprints_w_published_dates = (
biorxiv_journal_df.sort_values("document")
.merge(
published_dates[["biorxiv_doi", "published_date"]].rename(
index=str, columns={"biorxiv_doi": "preprint_doi"}
),
on="preprint_doi",
how="left",
)
.assign(published_date=lambda x: x.published_date.fillna(date.today()))
.assign(
time_to_published=lambda x: | pd.to_datetime(x.published_date) | pandas.to_datetime |
import traceback
import logging
from pymongo import MongoClient
import pandas as pd
from pandas.io.json import json_normalize
import libs.pandasLib as pl
from bson.objectid import ObjectId
def getPayloadsDfFromDB(collection, entityExtractionKeys):
'''
:param collection: collection client
:param entityExtractionKeys: Dictionary, platform names: array(attributes to access)
:return:
'''
df = | pd.DataFrame(columns=['payload', 'id']) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
from imblearn.over_sampling import SMOTE
def Imputer(data, kind = "mean"):
df = data.copy()
for feature in df.columns:
if df[feature].dtype == "float":
if kind == "mean":
df[feature] = df[feature].fillna(df[feature].mean())
elif kind == "median":
df[feature] = df[feature].fillna(df[feature].median())
elif kind == "mode":
df[feature] = df[feature].fillna(df[feature].mode()[0])
elif df[feature].dtype == "object":
df[feature] = df[feature].fillna(df[feature].mode()[0])
return df
def cimputer(fname: str,
kind: str = "mean",
dateCol: str = None,
dataDir: str = "data") -> None:
if not os.path.isdir(dataDir):
os.mkdir(dataDir)
if dateCol != "":
df = pd.read_csv(fname, parse_dates=[dateCol])
else:
df = pd.read_csv(fname)
dfImp = Imputer(df, kind)
if fname.find(f"{dataDir}/") != -1:
dfImp.to_csv(f"./{fname[:-4]}_{kind}_imputed.csv", index=False)
else:
dfImp.to_csv(f"./{dataDir}/{fname[:-4]}_{kind}_imputed.csv", index=False)
def Resample(data, replace, n_samples):
indices = data.index
random_sampled_indices = np.random.choice(indices,
size=n_samples,
replace=replace)
return data.loc[random_sampled_indices]
def cresample(fname: str,
target: str,
neg_value: str,
pos_value: str,
kind: str,
dateCol: str = None,
random_state = 123,
dataDir: str = "data") -> None:
if not os.path.isdir(dataDir):
os.mkdir(dataDir)
if kind == "smote":
df = pd.read_csv(fname, header=None)
else:
if dateCol != "":
df = pd.read_csv(fname, parse_dates=[dateCol])
else:
df = pd.read_csv(fname)
negClass = df[df[target] == neg_value]
posClass = df[df[target] == pos_value]
df = df.drop("Date", axis=1)
if kind == "oversample":
posOverSampled = Resample(data=posClass, replace=True, n_samples=len(negClass))
overSampled = pd.concat([negClass, posOverSampled])
if fname.find(f"{dataDir}/") != -1:
overSampled.to_csv(f"./{fname[:-4]}_oversampled.csv", index=False)
else:
overSampled.to_csv(f"./{dataDir}/{fname[:-4]}_oversampled.csv", index=False)
if kind == "undersample":
negUnderSampled = Resample(data=negClass, replace=False, n_samples=len(posClass))
underSampled = pd.concat([negUnderSampled, posClass])
if fname.find(f"{dataDir}/") != -1:
underSampled.to_csv(f"./{fname[:-4]}_undersampled.csv", index=False)
else:
underSampled.to_csv(f"./{dataDir}/{fname[:-4]}_undersampled.csv", index=False)
if kind == "smote":
so = SMOTE()
features, targets = so.fit_resample(df.iloc[:, :-1], df.iloc[:,-1])
smoteSampled = pd.concat([pd.DataFrame(features), pd.DataFrame(targets)], axis=1)
if fname.find(f"{dataDir}/") != -1:
smoteSampled.to_csv(f"./{fname[:-4]}_smotesampled.csv", index=False)
else:
smoteSampled.to_csv(f"./{dataDir}/{fname[:-4]}_smotesampled.csv", index=False)
def cresamplenum(fname: str,
target: str,
neg_value: int,
pos_value: int,
kind: str = "oversample",
dateCol: str = None,
random_state = 123,
dataDir: str = "data") -> None:
if kind == "smote":
df = pd.read_csv(fname, skiprows=1)
else:
if dateCol:
df = pd.read_csv(fname, parse_dates=[dateCol])
df = df.drop(dateCol, axis=1)
else:
df = pd.read_csv(fname)
negClass = df[df[target] == neg_value]
posClass = df[df[target] == pos_value]
if kind == "oversample":
posOverSampled = Resample(data=posClass, replace=True, n_samples=len(negClass))
overSampled = pd.concat([negClass, posOverSampled])
if fname.find(f"{dataDir}/") != -1:
overSampled.to_csv(f"./{fname[:-4]}_oversampled.csv", index=False)
else:
overSampled.to_csv(f"./{dataDir}/{fname[:-4]}_oversampled.csv", index=False)
if kind == "undersample":
negUnderSampled = Resample(data=negClass, replace=False, n_samples=len(posClass))
underSampled = pd.concat([negUnderSampled, posClass])
if fname.find(f"{dataDir}/") != -1:
underSampled.to_csv(f"./{fname[:-4]}_undersampled.csv", index=False)
else:
underSampled.to_csv(f"./{dataDir}/{fname[:-4]}_undersampled.csv", index=False)
if kind == "smote":
so = SMOTE()
features, targets = so.fit_resample(df.iloc[:, :-1], df.iloc[:,-1])
smoteSampled = pd.concat([pd.DataFrame(features), | pd.DataFrame(targets) | pandas.DataFrame |
# Author: <NAME>
# Date: 26 November 2016
# Python version: 3.5
# Updated June 2018 by <NAME> (KTH dESA)
# Modified grid algorithm and population calibration to improve computational speed
import logging
import pandas as pd
from math import pi, exp, log, sqrt, ceil
# from pyproj import Proj
import numpy as np
from collections import defaultdict
import os
logging.basicConfig(format='%(asctime)s\t\t%(message)s', level=logging.DEBUG)
# general
LHV_DIESEL = 9.9445485 # (kWh/l) lower heating value
HOURS_PER_YEAR = 8760
# Columns in settlements file must match these exactly
SET_COUNTRY = 'Country' # This cannot be changed, lots of code will break
SET_X = 'X' # Coordinate in metres/kilometres
SET_Y = 'Y' # Coordinate in metres/kilometres
SET_X_DEG = 'X_deg' # Coordinates in degrees
SET_Y_DEG = 'Y_deg'
SET_POP = 'Pop' # Population in people per point (equally, people per km2)
SET_POP_CALIB = 'PopStartCalibrated' # Calibrated population to reference year, same units
SET_POP_FUTURE = 'PopFuture' # Project future population, same units
SET_GRID_DIST_CURRENT = 'GridDistCurrent' # Distance in km from current grid
SET_GRID_DIST_PLANNED = 'GridDistPlan' # Distance in km from current and future grid
SET_ROAD_DIST = 'RoadDist' # Distance in km from road network
SET_NIGHT_LIGHTS = 'VIIRS' # Intensity of night time lights (from NASA), range 0 - 63
SET_TRAVEL_HOURS = 'TravelHours' # Travel time to large city in hours
SET_GHI = 'GHI' # Global horizontal irradiance in kWh/m2/day
SET_WINDVEL = 'WindVel' # Wind velocity in m/s
SET_WINDCF = 'WindCF' # Wind capacity factor as percentage (range 0 - 1)
SET_HYDRO = 'Hydropower' # Hydropower potential in kW
SET_HYDRO_DIST = 'HydropowerDist' # Distance to hydropower site in km
SET_HYDRO_FID = 'HydropowerFID' # the unique tag for eah hydropower, to not over-utilise
SET_SUBSTATION_DIST = 'SubstationDist'
SET_ELEVATION = 'Elevation' # in metres
SET_SLOPE = 'Slope' # in degrees
SET_LAND_COVER = 'LandCover'
SET_SOLAR_RESTRICTION = 'SolarRestriction'
SET_ROAD_DIST_CLASSIFIED = 'RoadDistClassified'
SET_SUBSTATION_DIST_CLASSIFIED = 'SubstationDistClassified'
SET_ELEVATION_CLASSIFIED = 'ElevationClassified'
SET_SLOPE_CLASSIFIED = 'SlopeClassified'
SET_LAND_COVER_CLASSIFIED = 'LandCoverClassified'
SET_COMBINED_CLASSIFICATION = 'GridClassification'
SET_GRID_PENALTY = 'GridPenalty'
SET_URBAN = 'IsUrban' # Whether the site is urban (0 or 1)
SET_ENERGY_PER_HH = 'EnergyPerHH'
SET_NUM_PEOPLE_PER_HH = 'NumPeoplePerHH'
SET_ELEC_CURRENT = 'ElecStart' # If the site is currently electrified (0 or 1)
SET_ELEC_FUTURE = 'ElecFuture' # If the site has the potential to be 'easily' electrified in future
SET_NEW_CONNECTIONS = 'NewConnections' # Number of new people with electricity connections
SET_NEW_CONNECTIONS_PROD = 'New_Connections_Prod' # Number of new people with electricity connections plus productive uses corresponding
SET_MIN_GRID_DIST = 'MinGridDist'
SET_LCOE_GRID = 'Grid' # All lcoes in USD/kWh
SET_LCOE_SA_PV = 'SA_PV'
SET_LCOE_SA_DIESEL = 'SA_Diesel'
SET_LCOE_MG_WIND = 'MG_Wind'
SET_LCOE_MG_DIESEL = 'MG_Diesel'
SET_LCOE_MG_PV = 'MG_PV'
SET_LCOE_MG_HYDRO = 'MG_Hydro'
SET_LCOE_MG_HYBRID = 'MG_Hybrid'
SET_MIN_OFFGRID = 'MinimumOffgrid' # The technology with lowest lcoe (excluding grid)
SET_MIN_OVERALL = 'MinimumOverall' # Same as above, but including grid
SET_MIN_OFFGRID_LCOE = 'MinimumTechLCOE' # The lcoe value for minimum tech
SET_MIN_OVERALL_LCOE = 'MinimumOverallLCOE' # The lcoe value for overall minimum
SET_MIN_OVERALL_CODE = 'MinimumOverallCode' # And a code from 1 - 7 to represent that option
SET_MIN_CATEGORY = 'MinimumCategory' # The category with minimum lcoe (grid, minigrid or standalone)
SET_NEW_CAPACITY = 'NewCapacity' # Capacity in kW
SET_INVESTMENT_COST = 'InvestmentCost' # The investment cost in USD
# Columns in the specs file must match these exactly
SPE_COUNTRY = 'Country'
SPE_POP = 'Pop2016' # The actual population in the base year
SPE_URBAN = 'UrbanRatio2016' # The ratio of urban population (range 0 - 1) in base year
SPE_POP_FUTURE = 'Pop2030'
SPE_URBAN_FUTURE = 'UrbanRatio2030'
SPE_URBAN_MODELLED = 'UrbanRatioModelled' # The urban ratio in the model after calibration (for comparison)
SPE_URBAN_CUTOFF = 'UrbanCutOff' # The urban cutoff population calirated by the model, in people per km2
SPE_URBAN_GROWTH = 'UrbanGrowth' # The urban growth rate as a simple multplier (urban pop future / urban pop present)
SPE_RURAL_GROWTH = 'RuralGrowth' # Same as for urban
SPE_NUM_PEOPLE_PER_HH_RURAL = 'NumPeoplePerHHRural'
SPE_NUM_PEOPLE_PER_HH_URBAN = 'NumPeoplePerHHUrban'
SPE_DIESEL_PRICE_LOW = 'DieselPriceLow' # Diesel price in USD/litre
SPE_DIESEL_PRICE_HIGH = 'DieselPriceHigh' # Same, with a high forecast var
SPE_GRID_PRICE = 'GridPrice' # Grid price of electricity in USD/kWh
SPE_GRID_CAPACITY_INVESTMENT = 'GridCapacityInvestmentCost' # grid capacity investments costs from TEMBA USD/kW
SPE_GRID_LOSSES = 'GridLosses' # As a ratio (0 - 1)
SPE_BASE_TO_PEAK = 'BaseToPeak' # As a ratio (0 - 1)
SPE_EXISTING_GRID_COST_RATIO = 'ExistingGridCostRatio'
SPE_MAX_GRID_DIST = 'MaxGridDist'
SPE_ELEC = 'ElecActual' # Actual current percentage electrified population (0 - 1)
SPE_ELEC_MODELLED = 'ElecModelled' # The modelled version after calibration (for comparison)
SPE_MIN_NIGHT_LIGHTS = 'MinNightLights'
SPE_MAX_GRID_EXTENSION_DIST = 'MaxGridExtensionDist'
SPE_MAX_ROAD_DIST = 'MaxRoadDist'
SPE_POP_CUTOFF1 = 'PopCutOffRoundOne'
SPE_POP_CUTOFF2 = 'PopCutOffRoundTwo'
class Technology:
"""
Used to define the parameters for each electricity access technology, and to calculate the LCOE depending on
input parameters.
"""
start_year = 2016
end_year = 2030
discount_rate = 0.08
grid_cell_area = 1 # in km2, normally 1km2
mv_line_cost = 9000 # USD/km
lv_line_cost = 5000 # USD/km
mv_line_capacity = 50 # kW/line
lv_line_capacity = 10 # kW/line
lv_line_max_length = 30 # km
hv_line_cost = 53000 # USD/km
mv_line_max_length = 50 # km
hv_lv_transformer_cost = 5000 # USD/unit
mv_increase_rate = 0.1 # percentage
def __init__(self,
tech_life, # in years
base_to_peak_load_ratio,
distribution_losses=0, # percentage
connection_cost_per_hh=0, # USD/hh
om_costs=0.0, # OM costs as percentage of capital costs
capital_cost=0, # USD/kW
capacity_factor=1.0, # percentage
efficiency=1.0, # percentage
diesel_price=0.0, # USD/litre
grid_price=0.0, # USD/kWh for grid electricity
standalone=False,
mg_pv=False,
mg_wind=False,
mg_diesel=False,
mg_hydro=False,
grid_capacity_investment=0.0, # USD/kW for on-grid capacity investments (excluding grid itself)
diesel_truck_consumption=0, # litres/hour
diesel_truck_volume=0, # litres
om_of_td_lines=0): # percentage
self.distribution_losses = distribution_losses
self.connection_cost_per_hh = connection_cost_per_hh
self.base_to_peak_load_ratio = base_to_peak_load_ratio
self.tech_life = tech_life
self.om_costs = om_costs
self.capital_cost = capital_cost
self.capacity_factor = capacity_factor
self.efficiency = efficiency
self.diesel_price = diesel_price
self.grid_price = grid_price
self.standalone = standalone
self.mg_pv = mg_pv
self.mg_wind = mg_wind
self.mg_diesel = mg_diesel
self.mg_hydro = mg_hydro
self.grid_capacity_investment = grid_capacity_investment
self.diesel_truck_consumption = diesel_truck_consumption
self.diesel_truck_volume = diesel_truck_volume
self.om_of_td_lines = om_of_td_lines
def pv_diesel_hybrid(self,
energy_per_hh, # kWh/household/year as defined
max_ghi, # highest annual GHI value encountered in the GIS data
max_travel_hours, # highest value for travel hours encountered in the GIS data
diesel_no=1, # 50, # number of diesel generators simulated
pv_no=1, #70, # number of PV panel sizes simulated
n_chg=0.92, # charge efficiency of battery
n_dis=0.92, # discharge efficiency of battery
lpsp=0.05, # maximum loss of load allowed over the year, in share of kWh
battery_cost=150, # battery capital capital cost, USD/kWh of storage capacity
pv_cost=2490, # PV panel capital cost, USD/kW peak power
diesel_cost=550, # diesel generator capital cost, USD/kW rated power
pv_life=20, # PV panel expected lifetime, years
diesel_life=15, # diesel generator expected lifetime, years
pv_om=0.015, # annual OM cost of PV panels
diesel_om=0.1, # annual OM cost of diesel generator
k_t=0.005): # temperature factor of PV panels
ghi = pd.read_csv('Supplementary_files\GHI_hourly.csv', usecols=[4], sep=';', skiprows=21).as_matrix()
# hourly GHI values downloaded from SoDa for one location in the country
temp = pd.read_csv('Supplementary_files\Temperature_hourly.csv', usecols=[4], sep=';', skiprows=21).as_matrix()
# hourly temperature values downloaded from SoDa for one location in the country
hour_numbers = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23) * 365
LHV_DIESEL = 9.9445485
dod_max = 0.8 # maximum depth of discharge of battery
# the values below define the load curve for the five tiers. The values reflect the share of the daily demand
# expected in each hour of the day (sum of all values for one tier = 1)
tier5_load_curve = np.array([0.021008403, 0.021008403, 0.021008403, 0.021008403, 0.027310924, 0.037815126,
0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.042016807,
0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.046218487, 0.050420168,
0.067226891, 0.084033613, 0.073529412, 0.052521008, 0.033613445, 0.023109244])
tier4_load_curve = np.array([0.017167382, 0.017167382, 0.017167382, 0.017167382, 0.025751073, 0.038626609,
0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.042918455,
0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.0472103, 0.051502146,
0.068669528, 0.08583691, 0.075107296, 0.053648069, 0.034334764, 0.021459227])
tier3_load_curve = np.array([0.013297872, 0.013297872, 0.013297872, 0.013297872, 0.019060284, 0.034574468,
0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.044326241,
0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.048758865, 0.053191489,
0.070921986, 0.088652482, 0.077570922, 0.055407801, 0.035460993, 0.019946809])
tier2_load_curve = np.array([0.010224949, 0.010224949, 0.010224949, 0.010224949, 0.019427403, 0.034764826,
0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.040899796,
0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.04601227, 0.056237219,
0.081799591, 0.102249489, 0.089468303, 0.06390593, 0.038343558, 0.017893661])
tier1_load_curve = np.array([0, 0, 0, 0, 0.012578616, 0.031446541, 0.037735849, 0.037735849, 0.037735849,
0.037735849, 0.037735849, 0.037735849, 0.037735849, 0.037735849, 0.037735849,
0.037735849, 0.044025157, 0.062893082, 0.100628931, 0.125786164, 0.110062893,
0.078616352, 0.044025157, 0.012578616])
if energy_per_hh < 75:
load_curve = tier1_load_curve * energy_per_hh / 365
elif energy_per_hh < 365:
load_curve = tier2_load_curve * energy_per_hh / 365
elif energy_per_hh < 1241:
load_curve = tier3_load_curve * energy_per_hh / 365
elif energy_per_hh < 2993:
load_curve = tier4_load_curve * energy_per_hh / 365
else:
load_curve = tier5_load_curve * energy_per_hh / 365
def pv_diesel_capacities(pv_capacity, battery_size, diesel_capacity, initital_condition=False):
condition = 1
ren_limit = 0
break_hour = 17
while condition > lpsp:
dod = np.zeros(24)
battery_use = np.zeros(24) # Stores the amount of battery discharge during the day
fuel_result = 0
battery_life = 0
soc = 0.5
unmet_demand = 0
annual_diesel_gen = 0
for i in range(8760):
diesel_gen = 0
battery_use[hour_numbers[i]] = 0.0002 * soc # Battery self-discharge
soc *= 0.9998
t_cell = temp[i] + 0.0256 * ghi[i] # PV cell temperature
pv_gen = pv_capacity * 0.9 * ghi[i] / 1000 * (
1 - k_t * (t_cell - 298.15)) # PV generation in the hour
net_load = load_curve[hour_numbers[i]] - pv_gen # remaining load not met by PV panels
if net_load <= 0: # If pv generation is greater than load the excess energy is stored in battery
if battery_size > 0:
soc -= n_chg * net_load / battery_size
net_load = 0
max_diesel = min(diesel_capacity, net_load + (1 - soc) * battery_size / n_chg)
# Maximum aount of diesel needed to supply load and charge battery, limited by rated diesel capacity
# Below is the dispatch strategy for the diesel generator as described in word document
if break_hour + 1 > hour_numbers[i] > 4 and net_load > soc * battery_size * n_dis:
diesel_gen = min(diesel_capacity, max(0.4 * diesel_capacity, net_load))
elif 23 > hour_numbers[i] > break_hour and max_diesel > 0.40 * diesel_capacity:
diesel_gen = max_diesel
elif n_dis * soc * battery_size < net_load:
diesel_gen = max(0.4 * diesel_capacity, max_diesel)
if diesel_gen > 0: # Fuel consumption is stored
fuel_result += diesel_capacity * 0.08145 + diesel_gen * 0.246
annual_diesel_gen += diesel_gen
if (net_load - diesel_gen) > 0: # If diesel generator cannot meet load the battery is also used
if battery_size > 0:
soc -= (net_load - diesel_gen) / n_dis / battery_size
battery_use[hour_numbers[i]] += (net_load - diesel_gen) / n_dis / battery_size
if soc < 0: # If battery and diesel generator cannot supply load there is unmet demand
unmet_demand -= soc * n_dis * battery_size
battery_use[hour_numbers[i]] += soc
soc = 0
else: # If diesel generation is larger than load the excess energy is stored in battery
if battery_size > 0:
soc += (diesel_gen - net_load) * n_chg / battery_size
if battery_size == 0: # If no battery and diesel generation < net load there is unmet demand
unmet_demand += net_load - diesel_gen
soc = min(soc, 1) # Battery state of charge cannot be >1
dod[hour_numbers[i]] = 1 - soc # The depth of discharge in every hour of the day is storeed
if hour_numbers[i] == 23 and max(dod) > 0: # The battery wear during the last day is calculated
battery_life += sum(battery_use) / (531.52764 * max(0.1, (max(dod) * dod_max)) ** -1.12297)
condition = unmet_demand / energy_per_hh # lpsp is calculated
if initital_condition: # During the first calculation the minimum PV size with no diesel generator is calculated
if condition > lpsp:
pv_capacity *= (1 + unmet_demand / energy_per_hh / 4)
elif condition > lpsp or (annual_diesel_gen > (1 - ren_limit) * energy_per_hh): # For the remaining configurations the solution is considered unusable if lpsp criteria is not met
diesel_capacity = 99
condition = 0
battery_life = 1
elif condition < lpsp: # If lpsp criteria is met the expected battery life is stored
battery_life = np.round(1 / battery_life)
return pv_capacity, diesel_capacity, battery_size, fuel_result, battery_life
# Initial PV size when no diesel generator is used is calculated and used as reference
ref = pv_diesel_capacities(energy_per_hh / 3000, 2 * energy_per_hh / 365, 0, initital_condition=True)
battery_sizes = [0.3 * energy_per_hh / 365, 0.5 * energy_per_hh / 365, 0.75 * energy_per_hh / 365, energy_per_hh / 365, 2 * energy_per_hh / 365, 0] # [2 * energy_per_hh / 365, energy_per_hh / 365, 0]
ref_battery_size = np.zeros((len(battery_sizes), pv_no, diesel_no))
ref_panel_size = np.zeros((len(battery_sizes), pv_no, diesel_no))
ref_diesel_cap = np.zeros((len(battery_sizes), pv_no, diesel_no))
ref_fuel_result = np.zeros((len(battery_sizes), pv_no, diesel_no))
ref_battery_life = np.zeros((len(battery_sizes), pv_no, diesel_no))
# For the number of diesel, pv and battery capacities the lpsp, battery lifetime and fuel usage is calculated
for k in range(len(battery_sizes)):
for i in range(pv_no):
for j in range(diesel_no):
a = pv_diesel_capacities(ref[0] * (pv_no - i) / pv_no, battery_sizes[k],
j * max(load_curve) / diesel_no)
ref_panel_size[k, i, j] = a[0]
ref_diesel_cap[k, i, j] = a[1]
ref_battery_size[k, i, j] = a[2]
ref_fuel_result[k, i, j] = a[3]
ref_battery_life[k, i, j] = min(20, a[4]) # Battery life limited to maximum 20 years
# Neccessary information for calculation of LCOE is defined
project_life = self.end_year - self.start_year
ghi_steps = int(
ceil((max_ghi - 1000) / 50) + 1) # GHI values rounded to nearest 50 are used for reference matrix
diesel_cost_max = 2 * self.diesel_price * self.diesel_truck_consumption * max_travel_hours / self.diesel_truck_volume / LHV_DIESEL
diesel_steps = int(
ceil(diesel_cost_max * 100) + 1) # Diesel values rounded to 0.01 USD used for reference matrix
generation = np.ones(project_life) * energy_per_hh
generation[0] = 0
year = np.arange(project_life)
discount_factor = (1 + self.discount_rate) ** year
investment_table = np.zeros((ghi_steps, diesel_steps)) # Stores least-cost configuration investments
pv_table = np.zeros((ghi_steps, diesel_steps)) # Stores PV size for least-cost configuraton
diesel_table = np.zeros((ghi_steps, diesel_steps)) # Stores diesel capacity for least-cost configuration
lcoe_table = np.ones((ghi_steps, diesel_steps)) * 99 # Stores LCOE for least-cost configuration
choice_table = np.zeros((ghi_steps, diesel_steps))
# For each combination of GHI and diesel price the least costly configuration is calculated by iterating through
# the different configurations specified above
for i in range(ghi_steps):
pv_size = ref_panel_size * ghi.sum() / 1000 / (1000 + 50 * i)
for j in range(diesel_steps):
for k in range(pv_no):
for l in range(diesel_no):
for m in range(len(battery_sizes)):
investments = np.zeros(project_life)
salvage = np.zeros(project_life)
fuel_costs = np.ones(project_life) * ref_fuel_result[m, k, l] * (self.diesel_price + 0.01 * j)
investments[0] = pv_size[m, k, l] * pv_cost + ref_diesel_cap[m, k, l] * diesel_cost
salvage[-1] = ref_diesel_cap[m, k, l] * diesel_cost * (1 - project_life / diesel_life) + \
pv_size[m, k, l] * pv_cost * (1 - project_life / pv_life)
om = np.ones(project_life) * (
pv_size[m, k, l] * pv_cost * pv_om + ref_diesel_cap[m, k, l] * diesel_cost * diesel_om)
if pv_life < project_life:
investments[pv_life] = pv_size[m, k, l] * pv_cost
if diesel_life < project_life:
investments[diesel_life] = ref_diesel_cap[m, k, l] * diesel_cost
for n in range(project_life):
if year[n] % ref_battery_life[m, k, l] == 0:
investments[n] += ref_battery_size[m, k, l] * battery_cost / dod_max
salvage[-1] += (1 - (
(project_life % ref_battery_life[m, k, l]) / ref_battery_life[m, k, l])) * \
battery_cost * ref_battery_size[m, k, l] / dod_max + ref_diesel_cap[
m, k, l] * \
diesel_cost * (1 - (
project_life % diesel_life) / diesel_life) \
+ pv_size[m, k, l] * pv_cost * (1 - (project_life % pv_life) / pv_life)
discount_investments = (investments + fuel_costs - salvage + om) / discount_factor
discount_generation = generation / discount_factor
lcoe = np.sum(discount_investments) / np.sum(discount_generation)
if lcoe < lcoe_table[i, j]:
lcoe_table[i, j] = lcoe
pv_table[i, j] = pv_size[m, k, l]
diesel_table[i, j] = ref_diesel_cap[m, k, l]
investment_table[i, j] = np.sum(discount_investments)
choice_table[i, j] = (l + 1) * 10 + (k + 1) * 10000 + m + 1
# first number is PV size, second is diesel, third is battery
return lcoe_table, pv_table, diesel_table, investment_table, load_curve[19], choice_table
@classmethod
def set_default_values(cls, start_year, end_year, discount_rate, grid_cell_area, mv_line_cost, lv_line_cost,
mv_line_capacity, lv_line_capacity, lv_line_max_length, hv_line_cost, mv_line_max_length,
hv_lv_transformer_cost, mv_increase_rate):
cls.start_year = start_year
cls.end_year = end_year
cls.discount_rate = discount_rate
cls.grid_cell_area = grid_cell_area
cls.mv_line_cost = mv_line_cost
cls.lv_line_cost = lv_line_cost
cls.mv_line_capacity = mv_line_capacity
cls.lv_line_capacity = lv_line_capacity
cls.lv_line_max_length = lv_line_max_length
cls.hv_line_cost = hv_line_cost
cls.mv_line_max_length = mv_line_max_length
cls.hv_lv_transformer_cost = hv_lv_transformer_cost
cls.mv_increase_rate = mv_increase_rate
def get_lcoe(self, energy_per_hh, people, num_people_per_hh, additional_mv_line_length=0, capacity_factor=0,
mv_line_length=0, travel_hours=0, ghi=0, urban=0, get_capacity=False, mini_grid=False, pv=False,
urban_hybrid=0, rural_hybrid=0, get_investment_cost=False, mg_pv=False, mg_wind=False,
mg_hydro=False, mg_diesel=False, mg_hybrid=False):
"""
Calculates the LCOE depending on the parameters. Optionally calculates the investment cost instead.
The only required parameters are energy_per_hh, people and num_people_per_hh
additional_mv_line_length requried for grid
capacity_factor required for PV and wind
mv_line_length required for hydro
travel_hours required for diesel
"""
if people == 0:
# If there are no people, the investment cost is zero.
if get_investment_cost:
return 0
# Otherwise we set the people low (prevent div/0 error) and continue.
else:
people = 0.00001
# If a new capacity factor isn't given, use the class capacity factor (for hydro, diesel etc)
if capacity_factor == 0:
capacity_factor = self.capacity_factor
consumption = people / num_people_per_hh * energy_per_hh # kWh/year
average_load = consumption / (1 - self.distribution_losses) / HOURS_PER_YEAR # kW
if mg_hybrid and urban == 1:
peak_load = urban_hybrid[4] * consumption
# peak_load = people / num_people_per_hh * urban_hybrid[4] * (1 + self.distribution_losses)
elif mg_hybrid and urban == 0:
peak_load = rural_hybrid[4] * consumption
# peak_load = people / num_people_per_hh * rural_hybrid[4] * (1 + self.distribution_losses)
else:
peak_load = average_load / self.base_to_peak_load_ratio # kW
no_mv_lines = peak_load / self.mv_line_capacity
no_lv_lines = peak_load / self.lv_line_capacity
lv_networks_lim_capacity = no_lv_lines / no_mv_lines
lv_networks_lim_length = ((self.grid_cell_area / no_mv_lines) / (self.lv_line_max_length / sqrt(2))) ** 2
actual_lv_lines = min([people / num_people_per_hh, max([lv_networks_lim_capacity, lv_networks_lim_length])])
hh_per_lv_network = (people / num_people_per_hh) / (actual_lv_lines * no_mv_lines)
lv_unit_length = sqrt(self.grid_cell_area / (people / num_people_per_hh)) * sqrt(2) / 2
lv_lines_length_per_lv_network = 1.333 * hh_per_lv_network * lv_unit_length
total_lv_lines_length = no_mv_lines * actual_lv_lines * lv_lines_length_per_lv_network
line_reach = (self.grid_cell_area / no_mv_lines) / (2 * sqrt(self.grid_cell_area / no_lv_lines))
total_length_of_lines = min([line_reach, self.mv_line_max_length]) * no_mv_lines
additional_hv_lines = max(
[0, round(sqrt(self.grid_cell_area) / (2 * min([line_reach, self.mv_line_max_length])) / 10, 3) - 1])
hv_lines_total_length = (sqrt(self.grid_cell_area) / 2) * additional_hv_lines * sqrt(self.grid_cell_area)
num_transformers = additional_hv_lines + no_mv_lines + (no_mv_lines * actual_lv_lines)
generation_per_year = average_load * HOURS_PER_YEAR
# The investment and O&M costs are different for grid and non-grid solutions
if self.grid_price > 0 :
td_investment_cost = hv_lines_total_length * self.hv_line_cost + \
total_length_of_lines * self.mv_line_cost + \
total_lv_lines_length * self.lv_line_cost + \
num_transformers * self.hv_lv_transformer_cost + \
(people / num_people_per_hh) * self.connection_cost_per_hh + \
additional_mv_line_length * (
self.mv_line_cost * (1 + self.mv_increase_rate) **
((additional_mv_line_length / 5) - 1))
td_om_cost = td_investment_cost * self.om_of_td_lines
total_investment_cost = td_investment_cost
total_om_cost = td_om_cost
fuel_cost = self.grid_price
else:
total_lv_lines_length *= 0 if self.standalone else 0.75
mv_total_line_cost = self.mv_line_cost * mv_line_length
lv_total_line_cost = self.lv_line_cost * total_lv_lines_length
td_investment_cost = mv_total_line_cost + lv_total_line_cost + (
people / num_people_per_hh) * self.connection_cost_per_hh
td_om_cost = td_investment_cost * self.om_of_td_lines
installed_capacity = peak_load / capacity_factor
if self.standalone:
if self.diesel_price > 0:
if (installed_capacity / people / num_people_per_hh) < 1:
installed_capacity = 1 * people / num_people_per_hh
if installed_capacity / (people / num_people_per_hh) < 0.020:
capital_investment = installed_capacity * self.capital_cost[0.020]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.020] * self.om_costs * installed_capacity)
elif installed_capacity / (people / num_people_per_hh) < 0.050:
capital_investment = installed_capacity * self.capital_cost[0.050]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.050] * self.om_costs * installed_capacity)
elif installed_capacity / (people / num_people_per_hh) < 0.100:
capital_investment = installed_capacity * self.capital_cost[0.100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.100] * self.om_costs * installed_capacity)
elif installed_capacity / (people / num_people_per_hh) < 0.200:
capital_investment = installed_capacity * self.capital_cost[0.200]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.200] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[0.300]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.300] * self.om_costs * installed_capacity)
elif self.mg_pv:
if installed_capacity < 50:
capital_investment = installed_capacity * self.capital_cost[50]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[50] * self.om_costs * installed_capacity)
elif installed_capacity < 75:
capital_investment = installed_capacity * self.capital_cost[75]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[75] * self.om_costs * installed_capacity)
elif installed_capacity < 100:
capital_investment = installed_capacity * self.capital_cost[100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[100] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[200]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[200] * self.om_costs * installed_capacity)
elif self.mg_wind:
if installed_capacity < 100:
capital_investment = installed_capacity * self.capital_cost[100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[100] * self.om_costs * installed_capacity)
elif installed_capacity < 1000:
capital_investment = installed_capacity * self.capital_cost[1000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[1000] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[10000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[10000] * self.om_costs * installed_capacity)
elif self.mg_hydro:
if installed_capacity < 1:
capital_investment = installed_capacity * self.capital_cost[1]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[1] * self.om_costs * installed_capacity)
elif installed_capacity < 100:
capital_investment = installed_capacity * self.capital_cost[100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[100] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[5000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[5000] * self.om_costs * installed_capacity)
elif self.mg_diesel:
if installed_capacity < 100:
capital_investment = installed_capacity * self.capital_cost[100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[100] * self.om_costs * installed_capacity)
elif installed_capacity < 1000:
capital_investment = installed_capacity * self.capital_cost[1000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[1000] * self.om_costs * installed_capacity)
elif installed_capacity < 5000:
capital_investment = installed_capacity * self.capital_cost[5000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[5000] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[25000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[25000] * self.om_costs * installed_capacity)
elif mg_hybrid:
capital_investment = installed_capacity * self.capital_cost
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost * self.om_costs * installed_capacity)
# If a diesel price has been passed, the technology is diesel
if self.diesel_price > 0 and not mg_hybrid:
# And we apply the Szabo formula to calculate the transport cost for the diesel
# p = (p_d + 2*p_d*consumption*time/volume)*(1/mu)*(1/LHVd)
fuel_cost = (self.diesel_price + 2 * self.diesel_price * self.diesel_truck_consumption * travel_hours /
self.diesel_truck_volume) / LHV_DIESEL / self.efficiency
# Otherwise it's hydro/wind etc with no fuel cost
else:
fuel_cost = 0
# Perform the time-value LCOE calculation
project_life = self.end_year - self.start_year
reinvest_year = 0
# If the technology life is less than the project life, we will have to invest twice to buy it again
if self.tech_life < project_life:
reinvest_year = self.tech_life
year = np.arange(project_life)
el_gen = generation_per_year * np.ones(project_life)
el_gen[0] = 0
discount_factor = (1 + self.discount_rate) ** year
investments = np.zeros(project_life)
investments[0] = total_investment_cost
if reinvest_year:
investments[reinvest_year] = total_investment_cost
salvage = np.zeros(project_life)
used_life = project_life
if reinvest_year:
# so salvage will come from the remaining life after the re-investment
used_life = project_life - self.tech_life
salvage[-1] = total_investment_cost * (1 - used_life / self.tech_life)
operation_and_maintenance = total_om_cost * np.ones(project_life)
operation_and_maintenance[0] = 0
fuel = el_gen * fuel_cost
fuel[0] = 0
if mg_hybrid:
diesel_lookup = int(round(2 * self.diesel_price * self.diesel_truck_consumption *
travel_hours / self.diesel_truck_volume / LHV_DIESEL * 100))
renewable_lookup = int(round((ghi - 1000) / 50))
if urban == 1 and pv:
ref_table = urban_hybrid[0]
ref_investments = urban_hybrid[3]
ref_capacity = urban_hybrid[1] + urban_hybrid[2]
elif urban == 0 and pv:
ref_table = rural_hybrid[0]
ref_investments = rural_hybrid[3]
ref_capacity = rural_hybrid[1] + rural_hybrid[2]
add_lcoe = ref_table[renewable_lookup, diesel_lookup]
add_investments = ref_investments[renewable_lookup, diesel_lookup] * people / num_people_per_hh
add_capacity = ref_capacity[renewable_lookup, diesel_lookup] * people / num_people_per_hh
# So we also return the total investment cost for this number of people
if get_investment_cost:
discounted_investments = investments / discount_factor
if mini_grid:
return add_investments + np.sum(discounted_investments)
else:
return np.sum(discounted_investments) + self.grid_capacity_investment * peak_load
# return np.sum(discounted_investments) + self.grid_capacity_investment * peak_load
elif get_capacity:
return add_capacity
else:
discounted_costs = (investments + operation_and_maintenance + fuel - salvage) / discount_factor
discounted_generation = el_gen / discount_factor
if mini_grid:
return np.sum(discounted_costs) / np.sum(discounted_generation) + add_lcoe
else:
return np.sum(discounted_costs) / np.sum(discounted_generation)
# return np.sum(discounted_costs) / np.sum(discounted_generation)
def get_grid_table(self, energy_per_hh, num_people_per_hh, max_dist):
"""
Uses calc_lcoe to generate a 2D grid with the grid LCOEs, for faster access in teh electrification algorithm
"""
logging.info('Creating a grid table for {} kWh/hh/year'.format(energy_per_hh))
# Coarser resolution at the high end (just to catch the few places with exceptional population density)
# The electrification algorithm must round off with the same scheme
people_arr_direct = list(range(1000)) + list(range(1000, 10000, 10)) + list(range(10000, 350000, 1000))
elec_dists = range(0, int(max_dist) + 20) # add twenty to handle edge cases
grid_lcoes = pd.DataFrame(index=elec_dists, columns=people_arr_direct)
for people in people_arr_direct:
for additional_mv_line_length in elec_dists:
grid_lcoes[people][additional_mv_line_length] = self.get_lcoe(
energy_per_hh=energy_per_hh,
people=people,
num_people_per_hh=num_people_per_hh,
additional_mv_line_length=additional_mv_line_length)
return grid_lcoes.to_dict()
class SettlementProcessor:
"""
Processes the dataframe and adds all the columns to determine the cheapest option and the final costs and summaries
"""
def __init__(self, path):
try:
self.df = pd.read_csv(path)
except FileNotFoundError:
print('Could not find the calibrated and prepped csv file')
raise
try:
self.df[SET_GHI]
except ValueError:
self.df = pd.read_csv(path, sep=';')
try:
self.df[SET_GHI]
except ValueError:
print('Column "GHI" not found, check column names in calibrated csv-file')
raise
def condition_df(self):
"""
Do any initial data conditioning that may be required.
"""
logging.info('Ensure that columns that are supposed to be numeric are numeric')
self.df[SET_GHI] = pd.to_numeric(self.df[SET_GHI], errors='coerce')
self.df[SET_WINDVEL] = pd.to_numeric(self.df[SET_WINDVEL], errors='coerce')
self.df[SET_NIGHT_LIGHTS] = pd.to_numeric(self.df[SET_NIGHT_LIGHTS], errors='coerce')
self.df[SET_ELEVATION] = | pd.to_numeric(self.df[SET_ELEVATION], errors='coerce') | pandas.to_numeric |
import os
import time
import asyncio
import redis
import pandas as pd
from bs4 import BeautifulSoup
from multiprocessing import Pool
PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) + "/dataset"
URI = "http://finance.naver.com/item/sise_day.nhn?code={}&page={}"
r = redis.StrictRedis(host='localhost', port=6379, db=0)
def parse_html(code):
print('Start {}'.format(code))
dfs = []
for page in get_last_page(code):
df = pd.read_html(URI.format(code, page), header=0)[0]
df = df.rename(columns={
'날짜': 'date',
'종가': 'close',
'전일비': 'diff',
'시가': 'open',
'고가': 'high',
'저가': 'low',
'거래량': 'volume'
})
df['date'] = df['date'].apply(lambda d: str( | pd.to_datetime(d) | pandas.to_datetime |
import param
import panel as pn
import holoviews as hv
import numpy as np
import pandas as pd
from holoviews import opts
import geopandas as gpd
from shapely.geometry import box
from importlib import resources
pn.extension()
def debug(msg):
# print(msg)
pass
class OverviewApp(param.Parameterized):
metrics_path = param.String("PDR2_metrics.parq")
skymap_path = param.String("deepCoadd_skyMap.csv")
hidden_tract = param.String("")
metric = param.ObjectSelector()
filter_ = param.ObjectSelector()
selected_tract_str = param.String(default="", label="Selected tracts")
plot_width = param.Integer(default=800, bounds=(1, None))
plot_height = param.Integer(default=400, bounds=(1, None))
def __init__(self, tracts_update_callback, active_tracts=None, **params):
debug("OverviewApp.__init__()")
# Declare Tap stream (source polygon element to be defined later)
self.stream = hv.streams.Selection1D()
super().__init__(**params)
# set up default empty objects
self.df = pd.DataFrame()
self.rangexy = hv.streams.RangeXY()
self.tracts_update_callback = tracts_update_callback
self._active_tracts = active_tracts
# load the skmap and metrics data
self.load_data()
@param.output()
def output(self):
debug("OverviewApp.output()")
"""output list of selected tracts"""
# return an empty list if no tracts selected
if self.stream.index == [""]:
return list()
else:
# return the tract list from the widget (which matches the selection)
return self.tracts_in_widget()
def load_data(self):
debug("OverviewApp.load_data()")
"""load in the source files, reorganize data, and set up widget options"""
data_package = "lsst_dashboard.data"
# load the metrics data
with resources.path(data_package, self.metrics_path) as path:
self.metrics_df = | pd.read_parquet(path) | pandas.read_parquet |
"""
Creation of dataset *.csv files for the fastmri dataset.
Copyright (c) 2019 <NAME> <k.hammernik at imperial dot ac dot uk>
Department of Computing, Imperial College London, London, United Kingdom
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import pandas as pd
import h5py
import xmltodict
import pprint
import argparse
import pathlib
import logging
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-path', type=pathlib.Path, required=True,
help='Path to the dataset',
)
parser.add_argument(
'--csv-path', type=pathlib.Path, required=True,
help='Path to the csv files',
)
parser.add_argument(
'--dataset', type=str, required=True,
help='Dataset for which the csv file should be generated.'
)
args = parser.parse_args()
image_type = '.h5'
print(f'Create dataset file for {args.dataset}')
output_name = f'{args.dataset}.csv'
# generate the file names
image_names = sorted([os.path.join(args.dataset, f) for f in os.listdir(os.path.join(args.data_path, args.dataset)) if f.endswith(image_type)])
# init dicts for infos that will be extracted from the dataset
img_info = {'filename' : image_names, 'type' : []}
acq_info = {'systemVendor' : [], 'systemModel' : [], 'systemFieldStrength_T' : [], 'receiverChannels' : [], 'institutionName' : [] }
seq_info = {'TR' : [] , 'TE' : [], 'TI': [], 'flipAngle_deg': [], 'sequence_type': [], 'echo_spacing': []}
enc_info = {'enc_x' : [], 'enc_y' : [], 'enc_z' : [], \
'rec_x' : [], 'rec_y' : [], 'rec_z' : [], \
'enc_x_mm' : [], 'enc_y_mm' : [], 'enc_z_mm' : [],
'rec_x_mm' : [], 'rec_y_mm' : [], 'rec_z_mm' : [],
'nPE' : []}
acc_info = {'acc' : [], 'num_low_freq' : []}
for fname in tqdm(image_names):
dset = h5py.File(os.path.join(args.data_path, fname),'r')
img_info['type'].append(dset.attrs['acquisition'])
acc_info['acc'].append(dset.attrs['acceleration'] if 'acceleration' in dset.attrs.keys() else 0)
acc_info['num_low_freq'].append(dset.attrs['num_low_frequency'] if 'num_low_frequency' in dset.attrs.keys() else 0)
header_xml = dset['ismrmrd_header'][()]
header = xmltodict.parse(header_xml)['ismrmrdHeader']
#pprint.pprint(header)
for key in acq_info.keys():
acq_info[key].append(header['acquisitionSystemInformation'][key])
for key in seq_info.keys():
if key in header['sequenceParameters']:
seq_info[key].append(header['sequenceParameters'][key])
else:
seq_info[key].append('n/a')
enc_info['nPE'].append(int(header['encoding']['encodingLimits']['kspace_encoding_step_1']['maximum'])+1)
if int(header['encoding']['encodingLimits']['kspace_encoding_step_1']['minimum']) != 0:
raise ValueError('be careful!')
for diridx in ['x', 'y', 'z']:
enc_info[f'enc_{diridx}'].append(header['encoding']['encodedSpace']['matrixSize'][diridx])
enc_info[f'rec_{diridx}'].append(header['encoding']['reconSpace']['matrixSize'][diridx])
enc_info[f'enc_{diridx}_mm'].append(header['encoding']['encodedSpace']['fieldOfView_mm'][diridx])
enc_info[f'rec_{diridx}_mm'].append(header['encoding']['reconSpace']['fieldOfView_mm'][diridx])
data_info = {**img_info, **acq_info, **enc_info, **acc_info, **seq_info}
# convert to pandas
df = | pd.DataFrame(data_info) | pandas.DataFrame |
# Copyright (c) 2018, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
import cudf
from cudf.core import DataFrame, Series
from cudf.tests.utils import assert_eq
_now = np.datetime64("now")
_tomorrow = _now + np.timedelta64(1, "D")
_now = np.int64(_now.astype("datetime64[ns]"))
_tomorrow = np.int64(_tomorrow.astype("datetime64[ns]"))
def make_frame(
dataframe_class,
nelem,
seed=0,
extra_levels=(),
extra_vals=(),
with_datetime=False,
):
np.random.seed(seed)
df = dataframe_class()
df["x"] = np.random.randint(0, 5, nelem)
df["y"] = np.random.randint(0, 3, nelem)
for lvl in extra_levels:
df[lvl] = np.random.randint(0, 2, nelem)
df["val"] = np.random.random(nelem)
for val in extra_vals:
df[val] = np.random.random(nelem)
if with_datetime:
df["datetime"] = np.random.randint(
_now, _tomorrow, nelem, dtype=np.int64
).astype("datetime64[ns]")
return df
def get_methods():
for method in ["cudf", "hash"]:
yield method
def get_nelem():
for elem in [2, 3, 1000]:
yield elem
@pytest.fixture
def gdf():
return DataFrame({"x": [1, 2, 3], "y": [0, 1, 1]})
@pytest.fixture
def pdf(gdf):
return gdf.to_pandas()
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_as_index_single_agg(pdf, gdf, as_index):
gdf = gdf.groupby("y", as_index=as_index).agg({"x": "mean"})
pdf = pdf.groupby("y", as_index=as_index).agg({"x": "mean"})
assert_eq(pdf, gdf)
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_as_index_multiindex(pdf, gdf, as_index):
pdf = pd.DataFrame(
{"a": [1, 2, 1], "b": [3, 3, 3], "c": [2, 2, 3], "d": [3, 1, 2]}
)
gdf = cudf.from_pandas(pdf)
gdf = gdf.groupby(["a", "b"], as_index=as_index).agg({"c": "mean"})
pdf = pdf.groupby(["a", "b"], as_index=as_index).agg({"c": "mean"})
if as_index:
assert_eq(pdf, gdf)
else:
# column names don't match - check just the values
for gcol, pcol in zip(gdf, pdf):
assert_array_equal(gdf[gcol].to_array(), pdf[pcol].values)
def test_groupby_default(pdf, gdf):
gdf = gdf.groupby("y").agg({"x": "mean"})
pdf = pdf.groupby("y").agg({"x": "mean"})
assert_eq(pdf, gdf)
def test_group_keys_true(pdf, gdf):
gdf = gdf.groupby("y", group_keys=True).sum()
pdf = pdf.groupby("y", group_keys=True).sum()
assert_eq(pdf, gdf)
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_getitem_getattr(as_index):
pdf = pd.DataFrame({"x": [1, 3, 1], "y": [1, 2, 3], "z": [1, 4, 5]})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.groupby("x")["y"].sum(), gdf.groupby("x")["y"].sum())
assert_eq(pdf.groupby("x").y.sum(), gdf.groupby("x").y.sum())
assert_eq(pdf.groupby("x")[["y"]].sum(), gdf.groupby("x")[["y"]].sum())
assert_eq(
pdf.groupby(["x", "y"], as_index=as_index).sum(),
gdf.groupby(["x", "y"], as_index=as_index).sum(),
)
@pytest.mark.parametrize("nelem", get_nelem())
@pytest.mark.parametrize("method", get_methods())
def test_groupby_mean(nelem, method):
got_df = (
make_frame(DataFrame, nelem=nelem)
.groupby(["x", "y"], method=method)
.mean()
)
expect_df = (
make_frame(pd.DataFrame, nelem=nelem).groupby(["x", "y"]).mean()
)
if method == "cudf":
got = np.sort(got_df["val"].to_array())
expect = np.sort(expect_df["val"].values)
np.testing.assert_array_almost_equal(expect, got)
else:
assert_eq(got_df, expect_df)
@pytest.mark.parametrize("nelem", get_nelem())
@pytest.mark.parametrize("method", get_methods())
def test_groupby_mean_3level(nelem, method):
lvls = "z"
bys = list("xyz")
got_df = (
make_frame(DataFrame, nelem=nelem, extra_levels=lvls)
.groupby(bys, method=method)
.mean()
)
expect_df = (
make_frame(pd.DataFrame, nelem=nelem, extra_levels=lvls)
.groupby(bys)
.mean()
)
if method == "cudf":
got = np.sort(got_df["val"].to_array())
expect = np.sort(expect_df["val"].values)
np.testing.assert_array_almost_equal(expect, got)
else:
assert_eq(got_df, expect_df)
@pytest.mark.parametrize("nelem", get_nelem())
@pytest.mark.parametrize("method", get_methods())
def test_groupby_agg_mean_min(nelem, method):
got_df = (
make_frame(DataFrame, nelem=nelem)
.groupby(["x", "y"], method=method)
.agg(["mean", "min"])
)
expect_df = (
make_frame(pd.DataFrame, nelem=nelem)
.groupby(["x", "y"])
.agg(["mean", "min"])
)
if method == "cudf":
got_mean = np.sort(got_df["val_mean"].to_array())
got_min = np.sort(got_df["val_min"].to_array())
expect_mean = np.sort(expect_df["val", "mean"].values)
expect_min = np.sort(expect_df["val", "min"].values)
# verify
np.testing.assert_array_almost_equal(expect_mean, got_mean)
np.testing.assert_array_almost_equal(expect_min, got_min)
else:
assert_eq(expect_df, got_df)
@pytest.mark.parametrize("nelem", get_nelem())
@pytest.mark.parametrize("method", get_methods())
def test_groupby_agg_min_max_dictargs(nelem, method):
got_df = (
make_frame(DataFrame, nelem=nelem, extra_vals="ab")
.groupby(["x", "y"], method=method)
.agg({"a": "min", "b": "max"})
)
expect_df = (
make_frame(pd.DataFrame, nelem=nelem, extra_vals="ab")
.groupby(["x", "y"])
.agg({"a": "min", "b": "max"})
)
if method == "cudf":
got_min = np.sort(got_df["a"].to_array())
got_max = np.sort(got_df["b"].to_array())
expect_min = np.sort(expect_df["a"].values)
expect_max = np.sort(expect_df["b"].values)
# verify
np.testing.assert_array_almost_equal(expect_min, got_min)
np.testing.assert_array_almost_equal(expect_max, got_max)
else:
assert_eq(expect_df, got_df)
@pytest.mark.parametrize("method", get_methods())
def test_groupby_cats(method):
df = DataFrame()
df["cats"] = pd.Categorical(list("aabaacaab"))
df["vals"] = np.random.random(len(df))
cats = np.asarray(list(df["cats"]))
vals = df["vals"].to_array()
grouped = df.groupby(["cats"], method=method, as_index=False).mean()
got_vals = grouped["vals"]
got_cats = grouped["cats"]
for c, v in zip(got_cats, got_vals):
print(c, v)
expect = vals[cats == c].mean()
np.testing.assert_almost_equal(v, expect)
def test_groupby_iterate_groups():
np.random.seed(0)
df = DataFrame()
nelem = 20
df["key1"] = np.random.randint(0, 3, nelem)
df["key2"] = np.random.randint(0, 2, nelem)
df["val1"] = np.random.random(nelem)
df["val2"] = np.random.random(nelem)
def assert_values_equal(arr):
np.testing.assert_array_equal(arr[0], arr)
for grp in df.groupby(["key1", "key2"], method="cudf"):
pddf = grp.to_pandas()
for k in "key1,key2".split(","):
assert_values_equal(pddf[k].values)
def test_groupby_as_df():
np.random.seed(0)
df = DataFrame()
nelem = 20
df["key1"] = np.random.randint(0, 3, nelem)
df["key2"] = np.random.randint(0, 2, nelem)
df["val1"] = np.random.random(nelem)
df["val2"] = np.random.random(nelem)
def assert_values_equal(arr):
np.testing.assert_array_equal(arr[0], arr)
df, segs = df.groupby(["key1", "key2"], method="cudf").as_df()
for s, e in zip(segs, list(segs[1:]) + [None]):
grp = df[s:e]
pddf = grp.to_pandas()
for k in "key1,key2".split(","):
assert_values_equal(pddf[k].values)
def test_groupby_apply():
np.random.seed(0)
df = DataFrame()
nelem = 20
df["key1"] = np.random.randint(0, 3, nelem)
df["key2"] = np.random.randint(0, 2, nelem)
df["val1"] = np.random.random(nelem)
df["val2"] = np.random.random(nelem)
expect_grpby = df.to_pandas().groupby(["key1", "key2"], as_index=False)
got_grpby = df.groupby(["key1", "key2"], method="cudf")
def foo(df):
df["out"] = df["val1"] + df["val2"]
return df
expect = expect_grpby.apply(foo)
expect = expect.sort_values(["key1", "key2"]).reset_index(drop=True)
got = got_grpby.apply(foo).to_pandas()
pd.util.testing.assert_frame_equal(expect, got)
def test_groupby_apply_grouped():
from numba import cuda
np.random.seed(0)
df = DataFrame()
nelem = 20
df["key1"] = np.random.randint(0, 3, nelem)
df["key2"] = np.random.randint(0, 2, nelem)
df["val1"] = np.random.random(nelem)
df["val2"] = np.random.random(nelem)
expect_grpby = df.to_pandas().groupby(["key1", "key2"], as_index=False)
got_grpby = df.groupby(["key1", "key2"], method="cudf")
def foo(key1, val1, com1, com2):
for i in range(cuda.threadIdx.x, len(key1), cuda.blockDim.x):
com1[i] = key1[i] * 10000 + val1[i]
com2[i] = i
got = got_grpby.apply_grouped(
foo,
incols=["key1", "val1"],
outcols={"com1": np.float64, "com2": np.int32},
tpb=8,
)
got = got.to_pandas()
# Get expected result by emulating the operation in pandas
def emulate(df):
df["com1"] = df.key1 * 10000 + df.val1
df["com2"] = np.arange(len(df), dtype=np.int32)
return df
expect = expect_grpby.apply(emulate)
expect = expect.sort_values(["key1", "key2"]).reset_index(drop=True)
pd.util.testing.assert_frame_equal(expect, got)
@pytest.mark.parametrize("nelem", [100, 500])
@pytest.mark.parametrize(
"func", ["mean", "std", "var", "min", "max", "count", "sum"]
)
@pytest.mark.parametrize("method", get_methods())
def test_groupby_cudf_2keys_agg(nelem, func, method):
# skip unimplemented aggs:
if func in ["var", "std"]:
if method in ["hash", "sort"]:
pytest.skip()
got_df = (
make_frame(DataFrame, nelem=nelem)
.groupby(["x", "y"], method=method)
.agg(func)
)
got_agg = np.sort(got_df["val"].to_array())
# pandas
expect_df = (
make_frame(pd.DataFrame, nelem=nelem).groupby(["x", "y"]).agg(func)
)
if method == "cudf":
expect_agg = np.sort(expect_df["val"].values)
# verify
np.testing.assert_array_almost_equal(expect_agg, got_agg)
else:
check_dtype = False if func == "count" else True
assert_eq(got_df, expect_df, check_dtype=check_dtype)
@pytest.mark.parametrize("agg", ["min", "max", "count", "sum", "mean"])
def test_series_groupby(agg):
s = pd.Series([1, 2, 3])
g = Series([1, 2, 3])
sg = s.groupby(s // 2)
gg = g.groupby(g // 2)
sa = getattr(sg, agg)()
ga = getattr(gg, agg)()
check_dtype = False if agg == "count" else True
assert_eq(sa, ga, check_dtype=check_dtype)
@pytest.mark.parametrize("agg", ["min", "max", "count", "sum", "mean"])
def test_series_groupby_agg(agg):
s = pd.Series([1, 2, 3])
g = Series([1, 2, 3])
sg = s.groupby(s // 2).agg(agg)
gg = g.groupby(g // 2).agg(agg)
check_dtype = False if agg == "count" else True
assert_eq(sg, gg, check_dtype=check_dtype)
@pytest.mark.parametrize("agg", ["min", "max", "count", "sum", "mean"])
def test_groupby_level_zero(agg):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[0, 1, 1])
gdf = DataFrame.from_pandas(pdf)
pdg = pdf.groupby(level=0)
gdg = gdf.groupby(level=0)
pdresult = getattr(pdg, agg)()
gdresult = getattr(gdg, agg)()
check_dtype = False if agg == "count" else True
assert_eq(pdresult, gdresult, check_dtype=check_dtype)
@pytest.mark.parametrize("agg", ["min", "max", "count", "sum", "mean"])
def test_groupby_series_level_zero(agg):
pdf = pd.Series([1, 2, 3], index=[0, 1, 1])
gdf = Series.from_pandas(pdf)
pdg = pdf.groupby(level=0)
gdg = gdf.groupby(level=0)
pdresult = getattr(pdg, agg)()
gdresult = getattr(gdg, agg)()
check_dtype = False if agg == "count" else True
assert_eq(pdresult, gdresult, check_dtype=check_dtype)
def test_groupby_column_name():
pdf = pd.DataFrame({"xx": [1.0, 2.0, 3.0], "yy": [1, 2, 3]})
gdf = DataFrame.from_pandas(pdf)
g = gdf.groupby("yy")
p = pdf.groupby("yy")
gxx = g["xx"].sum()
pxx = p["xx"].sum()
assert_eq(pxx, gxx)
def test_groupby_column_numeral():
pdf = pd.DataFrame({0: [1.0, 2.0, 3.0], 1: [1, 2, 3]})
gdf = DataFrame.from_pandas(pdf)
p = pdf.groupby(1)
g = gdf.groupby(1)
pxx = p[0].sum()
gxx = g[0].sum()
assert_eq(pxx, gxx)
pdf = pd.DataFrame({0.5: [1.0, 2.0, 3.0], 1.5: [1, 2, 3]})
gdf = DataFrame.from_pandas(pdf)
p = pdf.groupby(1.5)
g = gdf.groupby(1.5)
pxx = p[0.5].sum()
gxx = g[0.5].sum()
assert_eq(pxx, gxx)
@pytest.mark.parametrize(
"series",
[[0, 1, 0], [1, 1, 1], [0, 1, 1], [1, 2, 3], [4, 3, 2], [0, 2, 0]],
) # noqa: E501
def test_groupby_external_series(series):
pdf = pd.DataFrame({"x": [1.0, 2.0, 3.0], "y": [1, 2, 1]})
gdf = DataFrame.from_pandas(pdf)
pxx = pdf.groupby(pd.Series(series)).x.sum()
gxx = gdf.groupby(cudf.Series(series)).x.sum()
assert_eq(pxx, gxx)
@pytest.mark.parametrize("series", [[0.0, 1.0], [1.0, 1.0, 1.0, 1.0]])
def test_groupby_external_series_incorrect_length(series):
pdf = pd.DataFrame({"x": [1.0, 2.0, 3.0], "y": [1, 2, 1]})
gdf = DataFrame.from_pandas(pdf)
pxx = pdf.groupby(pd.Series(series)).x.sum()
gxx = gdf.groupby(cudf.Series(series)).x.sum()
assert_eq(pxx, gxx)
@pytest.mark.parametrize(
"level", [0, 1, "a", "b", [0, 1], ["a", "b"], ["a", 1], -1, [-1, -2]]
)
def test_groupby_levels(level):
idx = pd.MultiIndex.from_tuples([(1, 1), (1, 2), (2, 2)], names=("a", "b"))
pdf = pd.DataFrame({"c": [1, 2, 3], "d": [2, 3, 4]}, index=idx)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.groupby(level=level).sum(), gdf.groupby(level=level).sum())
def test_advanced_groupby_levels():
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [1, 2, 1], "z": [1, 1, 1]})
gdf = cudf.from_pandas(pdf)
pdg = pdf.groupby(["x", "y"]).sum()
gdg = gdf.groupby(["x", "y"]).sum()
assert_eq(pdg, gdg)
pdh = pdg.groupby(level=1).sum()
gdh = gdg.groupby(level=1).sum()
assert_eq(pdh, gdh)
pdg = pdf.groupby(["x", "y", "z"]).sum()
gdg = gdf.groupby(["x", "y", "z"]).sum()
pdg = pdf.groupby(["z"]).sum()
gdg = gdf.groupby(["z"]).sum()
assert_eq(pdg, gdg)
pdg = pdf.groupby(["y", "z"]).sum()
gdg = gdf.groupby(["y", "z"]).sum()
assert_eq(pdg, gdg)
pdg = pdf.groupby(["x", "z"]).sum()
gdg = gdf.groupby(["x", "z"]).sum()
assert_eq(pdg, gdg)
pdg = pdf.groupby(["y"]).sum()
gdg = gdf.groupby(["y"]).sum()
assert_eq(pdg, gdg)
pdg = pdf.groupby(["x"]).sum()
gdg = gdf.groupby(["x"]).sum()
assert_eq(pdg, gdg)
pdh = pdg.groupby(level=0).sum()
gdh = gdg.groupby(level=0).sum()
assert_eq(pdh, gdh)
pdg = pdf.groupby(["x", "y"]).sum()
gdg = gdf.groupby(["x", "y"]).sum()
pdh = pdg.groupby(level=[0, 1]).sum()
gdh = gdg.groupby(level=[0, 1]).sum()
assert_eq(pdh, gdh)
pdh = pdg.groupby(level=[1, 0]).sum()
gdh = gdg.groupby(level=[1, 0]).sum()
assert_eq(pdh, gdh)
pdg = pdf.groupby(["x", "y"]).sum()
gdg = gdf.groupby(["x", "y"]).sum()
with pytest.raises(IndexError) as raises:
pdh = pdg.groupby(level=2).sum()
raises.match("Too many levels")
with pytest.raises(IndexError) as raises:
gdh = gdg.groupby(level=2).sum()
# we use a different error message
raises.match("Invalid level number")
assert_eq(pdh, gdh)
@pytest.mark.parametrize(
"func",
[
lambda df: df.groupby(["x", "y", "z"]).sum(),
lambda df: df.groupby(["x", "y"]).sum(),
lambda df: df.groupby(["x", "y"]).agg("sum"),
lambda df: df.groupby(["y"]).sum(),
lambda df: df.groupby(["y"]).agg("sum"),
lambda df: df.groupby(["x"]).sum(),
lambda df: df.groupby(["x"]).agg("sum"),
lambda df: df.groupby(["x", "y"]).z.sum(),
lambda df: df.groupby(["x", "y"]).z.agg("sum"),
],
)
def test_empty_groupby(func):
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
gdf = cudf.from_pandas(pdf)
assert_eq(func(pdf), func(gdf), check_index_type=False)
def test_groupby_unsupported_columns():
np.random.seed(12)
pd_cat = pd.Categorical(
pd.Series(np.random.choice(["a", "b", 1], 3), dtype="category")
)
pdf = pd.DataFrame(
{
"x": [1, 2, 3],
"y": ["a", "b", "c"],
"z": ["d", "e", "f"],
"a": [3, 4, 5],
}
)
pdf["b"] = pd_cat
gdf = cudf.from_pandas(pdf)
pdg = pdf.groupby("x").sum()
gdg = gdf.groupby("x").sum()
assert_eq(pdg, gdg)
def test_list_of_series():
pdf = | pd.DataFrame({"x": [1, 2, 3], "y": [1, 2, 1]}) | pandas.DataFrame |
# Author: <NAME>, <NAME>, <NAME>
# Date: 2020/11/27
"""Compare the performance of different classifier and train the best model given cross_validate results .
Usage: src/clf_comparison.py <input_file> <input_file1> <output_file> <output_file1>
Options:
<input_file> Path (including filename and file extension) to transformed train file
<input_file1> Path (including filename and file extension) to transformed test file
<output_file> Path (including filename and file extension) to cross validate result file
<output_file1> Path (including filename and file extension) to store untuned model predictions
"""
#import packages
from docopt import docopt
import pandas as pd
import sys
import os
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import (
cross_validate,
GridSearchCV,
RandomizedSearchCV
)
from joblib import dump, load
from sklearn.metrics import f1_score, make_scorer
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
opt = docopt(__doc__)
def main(input_file, input_file1, output_file, output_file1):
# read train_df.csv
train = pd.read_csv(input_file)
test = pd.read_csv(input_file1)
# create split the train_df
X_train, y_train = train.drop(columns=["quality_level"]), train["quality_level"]
X_test, y_test = test.drop(columns=["quality_level"]), test["quality_level"]
# check if target folder exists
try:
os.makedirs(os.path.dirname(output_file))
except FileExistsError:
pass
# define classifiers
classifiers = {
"Logistic_Regression": LogisticRegression(random_state = 123, class_weight = 'balanced'),
"Random_Forest": RandomForestClassifier(random_state = 123, class_weight = 'balanced'),
"DummyClassifier": DummyClassifier(random_state = 123),
"SVC" : SVC(random_state = 123, class_weight = 'balanced'),
"K_Nearest_Neighbors": KNeighborsClassifier()
}
f1 = make_scorer(f1_score, average = 'weighted', labels = ['Excellent'])
def score_with_metrics(models, scoring=f1):
"""
Return cross-validation scores for given models as a dataframe.
Parameters
----------
models : dict
a dictionary with names and scikit-learn models
scoring : list/dict/string
scoring parameter values for cross-validation
Returns
----------
None
"""
results_df = {}
for (name, model) in models.items():
clf = model
scores = cross_validate(
clf, X_train, y_train, return_train_score=True, scoring=scoring
)
df = pd.DataFrame(scores)
results_df[name] = df.mean()
clf.fit(X_train, y_train)
# save the model
dump(clf, 'results/'+name+'.joblib')
return pd.DataFrame(results_df)
res = score_with_metrics(classifiers)
res = res.transpose()
best_model = res.idxmax()['test_score']
best_clf = classifiers[best_model]
best_clf.fit(X_train, y_train)
pred = best_clf.predict(X_test)
test_scores = f1_score(y_test, pred, average = 'weighted', labels = ['Excellent'])
best_score = pd.DataFrame({'Model': [best_model], 'Test_Score':[test_scores]})
res.to_csv(output_file, index = True)
best_score.to_csv(output_file1, index = False)
# perform hyperparameter tuning on two of the best models
param_RF = {'n_estimators':[int(i) for i in np.linspace(start = 100, stop = 1000, num = 10).tolist()],
'max_depth':[int(i) for i in np.linspace(start = 10, stop = 1000, num = 100).tolist()]}
param_log = {
"C": [0.0001, 0.001, 0.01, 0.1, 1.0, 10, 100, 1000]}
rf_search = RandomizedSearchCV(classifiers['Random_Forest'],
param_RF, cv = 5,
n_jobs = -1,
scoring = f1,
n_iter = 20, random_state = 123)
log_search = GridSearchCV(classifiers['Logistic_Regression'],
param_log, cv = 5,
n_jobs = -1,
scoring = f1
)
rf_search.fit(X_train, y_train)
log_search.fit(X_train, y_train)
rf_best = rf_search.best_estimator_
log_best = log_search.best_estimator_
tuned_results = {}
rf_score = cross_validate(rf_best, X_train, y_train, return_train_score=True, scoring=f1)
log_score = cross_validate(log_best, X_train, y_train, return_train_score=True, scoring=f1)
tuned_results['Random Forest'] = pd.DataFrame(rf_score).mean()
tuned_results['Logistic Regression'] = | pd.DataFrame(log_score) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import open as io_open
from builtins import str
from future import standard_library
standard_library.install_aliases()
__all__ = [
'generate_xref_descriptions',
'generate_files_for_reinsurance',
'ReinsuranceLayer',
'write_ri_input_files'
]
import json
import logging
import math
import os
import shutil
import subprocess32 as subprocess
from collections import namedtuple
from itertools import product
import anytree
import numbers
import pandas as pd
from ..utils.exceptions import OasisException
from ..utils.log import oasis_log
from . import oed
from six import string_types
# Metadata about an inuring layer
InuringLayer = namedtuple(
"InuringLayer",
"inuring_priority reins_numbers is_valid validation_messages")
def _get_location_tiv(location, coverage_type_id):
switcher = {
oed.BUILDING_COVERAGE_TYPE_ID: location.get('BuildingTIV', 0),
oed.OTHER_BUILDING_COVERAGE_TYPE_ID: location.get('OtherTIV', 0),
oed.CONTENTS_COVERAGE_TYPE_ID: location.get('ContentsTIV', 0),
oed.TIME_COVERAGE_TYPE_ID: location.get('BITIV', 0)
}
return switcher.get(coverage_type_id, 0)
def generate_xref_descriptions(accounts_fp, locations_fp):
accounts = pd.read_csv(accounts_fp)
locations = pd.read_csv(locations_fp)
coverage_id = 0
item_id = 0
group_id = 0
policy_agg_id = 0
profile_id = 0
site_agg_id = 0
accounts_and_locations = pd.merge(accounts, locations, left_on='AccNumber', right_on='AccNumber')
for acc_and_loc, coverage_type, peril in product((acc for _, acc in accounts_and_locations.iterrows()), oed.COVERAGE_TYPES, oed.PERILS):
tiv = _get_location_tiv(acc_and_loc, coverage_type)
if tiv > 0:
policy_agg_id += 1
profile_id += 1
group_id += 1
site_agg_id += 1
profile_id += 1
coverage_id += 1
item_id += 1
yield oed.XrefDescription(
xref_id = item_id,
account_number = acc_and_loc.get('AccNumber'),
location_number = acc_and_loc.get('LocNumber'),
location_group = acc_and_loc.get('LocGroup'),
cedant_name = acc_and_loc.get('CedantName'),
producer_name = acc_and_loc.get('ProducerName'),
lob = acc_and_loc.get('LOB'),
country_code = acc_and_loc.get('CountryCode'),
reins_tag = acc_and_loc.get('ReinsTag'),
coverage_type_id = coverage_type,
peril_id = peril,
policy_number = acc_and_loc.get('PolNumber'),
portfolio_number = acc_and_loc.get('PortNumber'),
tiv = tiv
)
@oasis_log
def generate_files_for_reinsurance(
items,
coverages,
fm_xrefs,
xref_descriptions,
ri_info_df,
ri_scope_df,
direct_oasis_files_dir,
gulsummaryxref=pd.DataFrame(),
fmsummaryxref=pd.DataFrame()):
"""
Generate files for reinsurance.
"""
inuring_metadata = {}
previous_inuring_priority = None
previous_risk_level = None
reinsurance_index = 1
for inuring_priority in range(1, ri_info_df['InuringPriority'].max() + 1):
# Filter the reinsNumbers by inuring_priority
reins_numbers = ri_info_df[ri_info_df['InuringPriority'] == inuring_priority].ReinsNumber.tolist()
risk_level_set = set(ri_scope_df[ri_scope_df['ReinsNumber'].isin(reins_numbers)].RiskLevel)
for risk_level in oed.REINS_RISK_LEVELS:
if risk_level not in risk_level_set:
continue
written_to_dir = _generate_files_for_reinsurance_risk_level(
inuring_priority,
items,
coverages,
fm_xrefs,
xref_descriptions,
gulsummaryxref,
fmsummaryxref,
ri_info_df,
ri_scope_df,
previous_inuring_priority,
previous_risk_level,
risk_level,
reinsurance_index,
direct_oasis_files_dir)
inuring_metadata[reinsurance_index] = {
'inuring_priority': inuring_priority,
'risk_level': risk_level,
'directory': written_to_dir,
}
previous_inuring_priority = inuring_priority
previous_risk_level = risk_level
reinsurance_index = reinsurance_index + 1
return inuring_metadata
def _generate_files_for_reinsurance_risk_level(
inuring_priority,
items,
coverages,
fm_xrefs,
xref_descriptions,
gulsummaryxref,
fmsummaryxref,
ri_info_df,
ri_scope_df,
previous_inuring_priority,
previous_risk_level,
risk_level,
reinsurance_index,
direct_oasis_files_dir):
"""
Generate files for a reinsurance risk level.
"""
reins_numbers_1 = ri_info_df[
ri_info_df['InuringPriority'] == inuring_priority].ReinsNumber
if reins_numbers_1.empty:
return None
reins_numbers_2 = ri_scope_df[
ri_scope_df.isin({"ReinsNumber": reins_numbers_1.tolist()}).ReinsNumber
& (ri_scope_df.RiskLevel == risk_level)].ReinsNumber
if reins_numbers_2.empty:
return None
ri_info_inuring_priority_df = ri_info_df[ri_info_df.isin(
{"ReinsNumber": reins_numbers_2.tolist()}).ReinsNumber]
output_name = "ri_{}_{}".format(inuring_priority, risk_level)
reinsurance_layer = ReinsuranceLayer(
name=output_name,
ri_info=ri_info_inuring_priority_df,
ri_scope=ri_scope_df,
items=items,
coverages=coverages,
fm_xrefs=fm_xrefs,
xref_descriptions=xref_descriptions,
gulsummaryxref=gulsummaryxref,
fmsummaryxref=fmsummaryxref,
risk_level=risk_level
)
reinsurance_layer.generate_oasis_structures()
output_dir = os.path.join(direct_oasis_files_dir, "RI_{}".format(reinsurance_index))
reinsurance_layer.write_oasis_files(output_dir)
return output_dir
@oasis_log
def write_ri_input_files(
exposure_fp,
accounts_fp,
items_fp,
coverages_fp,
gulsummaryxref_fp,
fm_xref_fp,
fmsummaryxref_fp,
ri_info_fp,
ri_scope_fp,
target_dir
):
xref_descriptions = pd.DataFrame(generate_xref_descriptions(accounts_fp, exposure_fp))
return generate_files_for_reinsurance(
pd.read_csv(items_fp),
pd.read_csv(coverages_fp),
pd.read_csv(fm_xref_fp),
xref_descriptions,
pd.read_csv(ri_info_fp),
pd.read_csv(ri_scope_fp),
target_dir,
gulsummaryxref=pd.read_csv(gulsummaryxref_fp),
fmsummaryxref=pd.read_csv(fmsummaryxref_fp)
)
class ReinsuranceLayer(object):
"""
Generates ktools inputs and runs financial module for a reinsurance structure.
"""
def __init__(self,
name, ri_info, ri_scope, items, coverages, fm_xrefs,
xref_descriptions, risk_level, fmsummaryxref=pd.DataFrame(), gulsummaryxref=pd.DataFrame(), logger=None):
self.logger = logger or logging.getLogger()
self.name = name
self.coverages = coverages
self.items = items
self.fm_xrefs = fm_xrefs
self.xref_descriptions = xref_descriptions
self.fmsummaryxref = fmsummaryxref
self.gulsummaryxref = gulsummaryxref
self.item_ids = list()
self.item_tivs = list()
self.fmprogrammes = pd.DataFrame()
self.fmprofiles = pd.DataFrame()
self.fm_policytcs = pd.DataFrame()
self.risk_level = risk_level
self.ri_info = ri_info
self.ri_scope = ri_scope
self.add_profiles_args = namedtuple(
"AddProfilesArgs",
"program_node, ri_info_row, scope_rows, overlay_loop, layer_id, "
"node_layer_profile_map, fmprofiles_list, nolossprofile_id, passthroughprofile_id")
def _add_node(self, description, parent, level_id, agg_id,
portfolio_number=oed.NOT_SET_ID, account_number=oed.NOT_SET_ID,
policy_number=oed.NOT_SET_ID, location_number=oed.NOT_SET_ID,
location_group=oed.NOT_SET_ID):
node = anytree.Node(
description,
parent=parent,
level_id=level_id,
agg_id=agg_id,
portfolio_number=portfolio_number,
account_number=account_number,
policy_number=policy_number,
location_group=location_group,
location_number=location_number)
return node
def _add_program_node(self, level_id):
return self._add_node(
"Treaty",
parent=None,
level_id=level_id,
agg_id=1)
def _add_item_node(self, xref_id, parent):
return self._add_node(
"Item_id:{}".format(xref_id),
parent=parent,
level_id=1,
agg_id=xref_id)
def _add_location_node(
self, level_id, agg_id, xref_description, parent):
return self._add_node(
"Portfolio_number:{} Account_number:{} Policy_number:{} Location_number:{}".format(
xref_description.portfolio_number,
xref_description.account_number,
xref_description.policy_number,
xref_description.location_number),
parent=parent,
level_id=level_id,
agg_id=agg_id,
portfolio_number=xref_description.portfolio_number,
account_number=xref_description.account_number,
policy_number=xref_description.policy_number,
location_group=xref_description.location_group,
location_number=xref_description.location_number)
def _add_location_group_node(
self, level_id, agg_id, xref_description, parent):
return self._add_node(
"Location_group:{}".format(xref_description.location_group),
parent=parent,
level_id=level_id,
agg_id=agg_id,
location_group=xref_description.location_group)
def _add_policy_node(
self, level_id, agg_id, xref_description, parent):
return self._add_node(
"Portfolio number:{} Account_number:{} Policy_number:{}".format(
xref_description.portfolio_number, xref_description.account_number, xref_description.policy_number),
parent=parent,
level_id=level_id,
agg_id=agg_id,
portfolio_number=xref_description.portfolio_number,
account_number=xref_description.account_number,
policy_number=xref_description.policy_number)
def _add_account_node(
self, agg_id, level_id, xref_description, parent):
return self._add_node(
"Portfolio number:{} Account_number:{}".format(
xref_description.portfolio_number, xref_description.account_number),
parent=parent,
level_id=level_id,
agg_id=agg_id,
portfolio_number=xref_description.portfolio_number,
account_number=xref_description.account_number)
def _add_portfolio_node(
self, agg_id, level_id, xref_description, parent):
return self._add_node(
"Portfolio number:{}".format(xref_description.portfolio_number),
parent=parent,
level_id=level_id,
agg_id=agg_id,
portfolio_number=xref_description.portfolio_number)
def _is_valid_id(self, id_to_check):
is_valid = self._is_defined(id_to_check) and \
((isinstance(id_to_check, string_types) and id_to_check != "")
or
(isinstance(id_to_check, numbers.Number) and id_to_check > 0))
return is_valid
def _match_portfolio(self, node, scope_row, exact=False):
if self._is_valid_id(scope_row.PortNumber):
return node.portfolio_number == scope_row.PortNumber
else:
return True
def _match_account(self, node, scope_row, exact=False):
match = False
if exact:
match = self._match_portfolio(node, scope_row) and node.account_number == scope_row.AccNumber
else:
if (self._is_valid_id(scope_row.PortNumber) and self._is_valid_id(scope_row.AccNumber)):
match = self._match_portfolio(node, scope_row) and node.account_number == scope_row.AccNumber
else:
match = self._match_portfolio(node, scope_row)
return match
def _match_policy(self, node, scope_row, exact=False):
match = False
if exact:
match = self._match_account(node, scope_row) and node.policy_number == scope_row.PolNumber
else:
if (self._is_valid_id(scope_row.PolNumber) and self._is_valid_id(scope_row.AccNumber) and self._is_valid_id(scope_row.PortNumber)):
match = self._match_account(node, scope_row) and node.policy_number == scope_row.PolNumber
else:
match = self._match_account(node, scope_row)
return match
def _match_location(self, node, scope_row, exact=False):
match = False
if self._is_valid_id(scope_row.PolNumber):
if exact:
match = self._match_policy(node, scope_row) and node.location_number == scope_row.LocNumber
else:
if self._is_valid_id(scope_row.LocNumber) and self._is_valid_id(scope_row.AccNumber) and self._is_valid_id(scope_row.PortNumber):
match = self._match_policy(node, scope_row) and node.location_number == scope_row.LocNumber
else:
match = self._match_policy(node, scope_row)
else:
if exact:
match = self._match_account(node, scope_row) and node.location_number == scope_row.LocNumber
else:
if self._is_valid_id(scope_row.LocNumber) and self._is_valid_id(scope_row.AccNumber) and self._is_valid_id(scope_row.PortNumber):
match = self._match_account(node, scope_row) and node.location_number == scope_row.LocNumber
else:
match = self._match_account(node, scope_row)
return match
def _match_location_group(self, node, scope_row, exact=False):
match = False
if self._is_valid_id(scope_row.LocGroup):
match = node.location_group == scope_row.LocGroup
return match
def _is_valid_filter(self, value):
return (value is not None and value != "" and value == value)
def _match_row(self, node, scope_row):
match = True
if match and self._is_valid_filter(scope_row.PortNumber):
match = node.portfolio_number == scope_row.PortNumber
if match and self._is_valid_filter(scope_row.AccNumber):
match = node.account_number == scope_row.AccNumber
if match and self._is_valid_filter(scope_row.PolNumber):
match = node.policy_number == scope_row.PolNumber
if match and self._is_valid_filter(scope_row.LocGroup):
match = node.location_group == scope_row.LocGroup
if match and self._is_valid_filter(scope_row.LocNumber):
match = node.location_number == scope_row.LocNumber
# if match and self._is_valid_filter(scope_row.CedantName):
# if match and self._is_valid_filter(scope_row.ProducerName):
# if match and self._is_valid_filter(scope_row.LOB):
# if match and self._is_valid_filter(scope_row.CountryCode):
# if match and self._is_valid_filter(scope_row.ReinsTag):
return match
def _scope_filter(self, nodes_list, scope_row, exact=False):
"""
Return subset of `nodes_list` based on values of a row in `ri_scope.csv`
"""
filtered_nodes_list = list(filter(
lambda n: self._match_row(n, scope_row),
nodes_list))
return filtered_nodes_list
def _risk_level_filter(self, nodes_list, scope_row, exact=False):
"""
Return subset of `nodes_list` based on values of a row in `ri_scope.csv`
"""
if (scope_row.RiskLevel == oed.REINS_RISK_LEVEL_PORTFOLIO):
return list(filter(
lambda n: self._match_portfolio(n, scope_row, exact),
nodes_list))
elif (scope_row.RiskLevel == oed.REINS_RISK_LEVEL_ACCOUNT):
return list(filter(
lambda n: self._match_account(n, scope_row, exact),
nodes_list))
elif scope_row.RiskLevel == oed.REINS_RISK_LEVEL_POLICY:
nodes_list = list(filter(
lambda n: self._match_policy(n, scope_row, exact),
nodes_list))
elif scope_row.RiskLevel == oed.REINS_RISK_LEVEL_LOCATION:
nodes_list = list(filter(
lambda n: self._match_location(n, scope_row, exact),
nodes_list))
elif scope_row.RiskLevel == oed.REINS_RISK_LEVEL_LOCATION_GROUP:
nodes_list = list(filter(
lambda n: self._match_location_group(n, scope_row, exact),
nodes_list))
else:
raise OasisException("Unknown risk level: {}".format(scope_row.RiskLevel))
return nodes_list
def _is_defined(self, num_to_check):
# If the value = NaN it will return False
return num_to_check == num_to_check
def _check_scope_row(self, scope_row):
# For some treaty types the scope filter much match exactly
okay = True
if (scope_row.RiskLevel == oed.REINS_RISK_LEVEL_ACCOUNT):
okay = \
self._is_valid_id(scope_row.AccNumber) and \
not self._is_valid_id(scope_row.PolNumber) and \
not self._is_valid_id(scope_row.LocNumber)
elif scope_row.RiskLevel == oed.REINS_RISK_LEVEL_POLICY:
okay = \
self._is_valid_id(scope_row.AccNumber) and \
self._is_valid_id(scope_row.PolNumber) and \
not self._is_valid_id(scope_row.LocNumber)
elif scope_row.RiskLevel == oed.REINS_RISK_LEVEL_LOCATION:
okay = \
self._is_valid_id(scope_row.AccNumber) and \
self._is_valid_id(scope_row.LocNumber)
elif scope_row.RiskLevel == oed.REINS_RISK_LEVEL_LOCATION_GROUP:
okay = \
self._is_valid_id(scope_row.LocGroup)
return okay
LOCATION_RISK_LEVEL = 2
def _get_tree(self):
current_location_number = 0
current_policy_number = 0
current_account_number = 0
current_portfolio_number = 0
current_location_group = 0
current_location_node = None
current_node = None
if self.risk_level == oed.REINS_RISK_LEVEL_LOCATION:
risk_level_id = self.LOCATION_RISK_LEVEL
else:
risk_level_id = self.LOCATION_RISK_LEVEL + 1
program_node_level_id = risk_level_id + 1
program_node = self._add_program_node(program_node_level_id)
if self.risk_level == oed.REINS_RISK_LEVEL_LOCATION_GROUP:
xref_descriptions = self.xref_descriptions.sort_values(
by=["location_group", "portfolio_number", "account_number", "policy_number", "location_number"])
else:
xref_descriptions = self.xref_descriptions.sort_values(
by=["portfolio_number", "account_number", "policy_number", "location_number"])
agg_id = 0
loc_agg_id = 0
for row in xref_descriptions.itertuples():
if self.risk_level == oed.REINS_RISK_LEVEL_PORTFOLIO:
if current_portfolio_number != row.portfolio_number:
agg_id = agg_id + 1
current_node = self._add_portfolio_node(
agg_id, risk_level_id, row, program_node)
elif self.risk_level == oed.REINS_RISK_LEVEL_ACCOUNT:
if \
current_portfolio_number != row.portfolio_number or \
current_account_number != row.account_number:
agg_id = agg_id + 1
current_node = self._add_account_node(
agg_id, risk_level_id, row, program_node)
elif self.risk_level == oed.REINS_RISK_LEVEL_POLICY:
if \
current_portfolio_number != row.portfolio_number or \
current_account_number != row.account_number or \
current_policy_number != row.policy_number:
agg_id = agg_id + 1
current_node = self._add_policy_node(
risk_level_id, agg_id, row, program_node)
elif self.risk_level == oed.REINS_RISK_LEVEL_LOCATION_GROUP:
if current_location_group != row.location_group:
agg_id = agg_id + 1
current_node = self._add_location_group_node(
risk_level_id, agg_id, row, program_node)
if \
current_portfolio_number != row.portfolio_number or \
current_account_number != row.account_number or \
current_policy_number != row.policy_number or \
current_location_number != row.location_number:
loc_agg_id = loc_agg_id + 1
level_id = 2
if self.risk_level == oed.REINS_RISK_LEVEL_LOCATION:
current_location_node = self._add_location_node(
level_id, loc_agg_id, row, program_node)
else:
current_location_node = self._add_location_node(
level_id, loc_agg_id, row, current_node)
current_portfolio_number = row.portfolio_number
current_account_number = row.account_number
current_policy_number = row.policy_number
current_location_number = row.location_number
current_location_group = row.location_group
self._add_item_node(row.xref_id, current_location_node)
return program_node
def _get_risk_level_id(self):
if self.risk_level == oed.REINS_RISK_LEVEL_LOCATION:
risk_level_id = 2
else:
risk_level_id = 3
return risk_level_id
def _get_filter_level_id(self):
risk_level_id = 2
return risk_level_id
def _get_next_profile_id(self, add_profiles_args):
profile_id = max(
x.profile_id for x in add_profiles_args.fmprofiles_list)
return profile_id + 1
def _add_fac_profiles(self, add_profiles_args):
self.logger.debug("Adding FAC profiles:")
profile_id = self._get_next_profile_id(add_profiles_args)
add_profiles_args.fmprofiles_list.append(oed.get_reinsurance_profile(
profile_id,
attachment=add_profiles_args.ri_info_row.RiskAttachment,
limit=add_profiles_args.ri_info_row.RiskLimit,
ceded=add_profiles_args.ri_info_row.CededPercent,
placement=add_profiles_args.ri_info_row.PlacedPercent
))
nodes_risk_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_risk_level_id())
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
nodes_filter_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_filter_level_id())
for node in nodes_filter_level_all:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
for _, ri_scope_row in add_profiles_args.scope_rows.iterrows():
# Note that FAC profiles scope much match the filter exactly.
if not self._check_scope_row(ri_scope_row):
raise OasisException("Invalid scope row: {}".format(ri_scope_row))
nodes = self._risk_level_filter(nodes_risk_level_all, ri_scope_row, exact=True)
for node in nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
def _add_per_risk_profiles(self, add_profiles_args):
self.logger.debug("Adding PR profiles:")
profile_id = self._get_next_profile_id(add_profiles_args)
nodes_risk_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_risk_level_id())
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
nodes_filter_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_filter_level_id())
add_profiles_args.fmprofiles_list.append(oed.get_reinsurance_profile(
profile_id,
attachment=add_profiles_args.ri_info_row.RiskAttachment,
limit=add_profiles_args.ri_info_row.RiskLimit,
ceded=add_profiles_args.ri_info_row.CededPercent,
))
for _, ri_scope_row in add_profiles_args.scope_rows.iterrows():
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
selected_nodes = self._scope_filter(nodes_filter_level_all, ri_scope_row, exact=False)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
selected_nodes = self._risk_level_filter(nodes_risk_level_all, ri_scope_row, exact=False)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
# add OccLimit / Placed Percent
profile_id = profile_id + 1
add_profiles_args.fmprofiles_list.append(
oed.get_occlim_profile(
profile_id,
limit=add_profiles_args.ri_info_row.OccLimit,
placement=add_profiles_args.ri_info_row.PlacedPercent,
))
add_profiles_args.node_layer_profile_map[
(add_profiles_args.program_node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
def _add_surplus_share_profiles(self, add_profiles_args):
self.logger.debug("Adding SS profiles:")
profile_id = self._get_next_profile_id(add_profiles_args)
nodes_risk_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_risk_level_id())
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
nodes_filter_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_filter_level_id())
for node in nodes_filter_level_all:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
for _, ri_scope_row in add_profiles_args.scope_rows.iterrows():
# Note that surplus share profiles scope much match the filter exactly.
if not self._check_scope_row(ri_scope_row):
raise OasisException("Invalid scope row: {}".format(ri_scope_row))
add_profiles_args.fmprofiles_list.append(oed.get_reinsurance_profile(
profile_id,
attachment=add_profiles_args.ri_info_row.RiskAttachment,
limit=add_profiles_args.ri_info_row.RiskLimit,
ceded=ri_scope_row.CededPercent,
))
selected_nodes = self._risk_level_filter(nodes_risk_level_all, ri_scope_row, exact=True)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
profile_id = profile_id + 1
# add OccLimit / Placed Percent
add_profiles_args.fmprofiles_list.append(
oed.get_occlim_profile(
profile_id,
limit=add_profiles_args.ri_info_row.OccLimit,
placement=add_profiles_args.ri_info_row.PlacedPercent,
))
add_profiles_args.node_layer_profile_map[
(add_profiles_args.program_node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
def _add_quota_share_profiles(self, add_profiles_args):
self.logger.debug("Adding QS profiles:")
profile_id = self._get_next_profile_id(add_profiles_args)
nodes_risk_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_risk_level_id())
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
nodes_filter_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_filter_level_id())
add_profiles_args.fmprofiles_list.append(
oed.get_reinsurance_profile(
profile_id,
limit=add_profiles_args.ri_info_row.RiskLimit,
ceded=add_profiles_args.ri_info_row.CededPercent,
))
for _, ri_scope_row in add_profiles_args.scope_rows.iterrows():
# Filter
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
selected_nodes = self._scope_filter(nodes_filter_level_all, ri_scope_row, exact=False)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
selected_nodes = self._risk_level_filter(nodes_risk_level_all, ri_scope_row, exact=False)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
# add OccLimit / Placed Percent
profile_id = profile_id + 1
add_profiles_args.fmprofiles_list.append(
oed.get_occlim_profile(
profile_id,
limit=add_profiles_args.ri_info_row.OccLimit,
placement=add_profiles_args.ri_info_row.PlacedPercent,
))
add_profiles_args.node_layer_profile_map[
(add_profiles_args.program_node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
def _add_cat_xl_profiles(self, add_profiles_args):
self.logger.debug("Adding CAT XL profiles")
profile_id = self._get_next_profile_id(add_profiles_args)
nodes_risk_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_risk_level_id())
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
nodes_filter_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_filter_level_id())
for _, ri_scope_row in add_profiles_args.scope_rows.iterrows():
# Filter
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
selected_nodes = self._scope_filter(nodes_filter_level_all, ri_scope_row, exact=False)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
selected_nodes = self._risk_level_filter(nodes_risk_level_all, ri_scope_row, exact=False)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
# Add OccLimit / Placed Percent
add_profiles_args.fmprofiles_list.append(
oed.get_reinsurance_profile(
profile_id,
attachment=add_profiles_args.ri_info_row.OccAttachment,
ceded=add_profiles_args.ri_info_row.CededPercent,
limit=add_profiles_args.ri_info_row.OccLimit,
placement=add_profiles_args.ri_info_row.PlacedPercent,
))
add_profiles_args.node_layer_profile_map[
(add_profiles_args.program_node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
def _log_reinsurance_structure(self, add_profiles_args):
if self.logger:
self.logger.debug('policytc_map: "{}"'.format(self.name))
policytc_map = dict()
for k in add_profiles_args.node_layer_profile_map.keys():
profile_id = add_profiles_args.node_layer_profile_map[k]
policytc_map["(Name=%s, layer_id=%s, overlay_loop=%s)" % k] = profile_id
self.logger.debug(json.dumps(policytc_map, indent=4))
self.logger.debug('fm_policytcs: "{}"'.format(self.name))
self.logger.debug(self.fm_policytcs)
self.logger.debug('fm_profile: "{}"'.format(self.name))
self.logger.debug(self.fmprofiles)
self.logger.debug('fm_programme: "{}"'.format(self.name))
self.logger.debug(self.fmprogrammes)
def _log_tree(self, program_node):
if self.logger:
self.logger.debug('program_node tree: "{}"'.format(self.name))
self.logger.debug(anytree.RenderTree(program_node))
def _log_reinsurance_structure(self, add_profiles_args):
if self.logger:
self.logger.debug('policytc_map: "{}"'.format(self.name))
policytc_map = dict()
for k in add_profiles_args.node_layer_profile_map.keys():
profile_id = add_profiles_args.node_layer_profile_map[k]
policytc_map["(Name=%s, layer_id=%s, overlay_loop=%s)" % k] = profile_id
self.logger.debug(json.dumps(policytc_map, indent=4))
self.logger.debug('fm_policytcs: "{}"'.format(self.name))
self.logger.debug(self.fm_policytcs)
self.logger.debug('fm_profile: "{}"'.format(self.name))
self.logger.debug(self.fmprofiles)
self.logger.debug('fm_programme: "{}"'.format(self.name))
self.logger.debug(self.fmprogrammes)
def generate_oasis_structures(self):
'''
Create the Oasis structures - FM Programmes, FM Profiles and FM Policy TCs -
that represent the reinsurance structure.
The algorithm to create the stucture has three steps:
Step 1 - Build a tree representation of the insurance program, depending on the reinsurance risk level.
Step 2 - Overlay the reinsurance structure. Each reinsurance contact is a seperate layer.
Step 3 - Iterate over the tree and write out the Oasis structure.
'''
fmprogrammes_list = list()
fmprofiles_list = list()
fm_policytcs_list = list()
profile_id = 1
nolossprofile_id = profile_id
fmprofiles_list.append(
oed.get_no_loss_profile(nolossprofile_id))
profile_id = profile_id + 1
passthroughprofile_id = profile_id
fmprofiles_list.append(
oed.get_pass_through_profile(passthroughprofile_id))
node_layer_profile_map = {}
self.logger.debug(fmprofiles_list)
#
# Step 1 - Build a tree representation of the insurance program, depening on the reinsurance risk level.
#
program_node = self._get_tree()
self._log_tree(program_node)
#
# Step 2 - Overlay the reinsurance structure. Each reinsurance contact is a seperate layer.
#
layer_id = 1 # Current layer ID
overlay_loop = 0 # Overlays multiple rules in same layer
prev_reins_number = 1
for _, ri_info_row in self.ri_info.iterrows():
overlay_loop += 1
scope_rows = self.ri_scope[
(self.ri_scope.ReinsNumber == ri_info_row.ReinsNumber)
& (self.ri_scope.RiskLevel == self.risk_level)]
# If FAC, don't increment the layer number
# Else, only increment inline with the reins_number
if ri_info_row.ReinsType in ['FAC']:
pass
elif prev_reins_number < ri_info_row.ReinsNumber:
layer_id += 1
prev_reins_number = ri_info_row.ReinsNumber
if self.logger:
pd.set_option('display.width', 1000)
self.logger.debug('ri_scope: "{}"'.format(self.name))
self.logger.debug(scope_rows)
if scope_rows.shape[0] == 0:
continue
add_profiles_args = self.add_profiles_args(
program_node, ri_info_row, scope_rows, overlay_loop, layer_id,
node_layer_profile_map, fmprofiles_list,
nolossprofile_id, passthroughprofile_id)
# Add pass through nodes at all levels so that the risks not explicitly covered are unaffected
for node in anytree.iterators.LevelOrderIter(add_profiles_args.program_node):
if self.risk_level == oed.REINS_RISK_LEVEL_LOCATION:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.nolossprofile_id
else:
if node.level_id == self._get_risk_level_id():
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.nolossprofile_id
elif node.level_id == self._get_filter_level_id():
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.nolossprofile_id
else:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
add_profiles_args.node_layer_profile_map[(
add_profiles_args.program_node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
if ri_info_row.ReinsType == oed.REINS_TYPE_FAC:
self._add_fac_profiles(add_profiles_args)
elif ri_info_row.ReinsType == oed.REINS_TYPE_PER_RISK:
self._add_per_risk_profiles(add_profiles_args)
elif ri_info_row.ReinsType == oed.REINS_TYPE_QUOTA_SHARE:
self._add_quota_share_profiles(add_profiles_args)
elif ri_info_row.ReinsType == oed.REINS_TYPE_SURPLUS_SHARE:
self._add_surplus_share_profiles(add_profiles_args)
elif ri_info_row.ReinsType == oed.REINS_TYPE_CAT_XL:
self._add_cat_xl_profiles(add_profiles_args)
else:
raise Exception("ReinsType not supported yet: {}".format(
ri_info_row.ReinsType))
#
# Step 3 - Iterate over the tree and write out the Oasis structure.
#
for node in anytree.iterators.LevelOrderIter(program_node):
if node.parent is not None:
fmprogrammes_list.append(
oed.FmProgramme(
from_agg_id=node.agg_id,
level_id=node.level_id,
to_agg_id=node.parent.agg_id
)
)
for layer in range(1, layer_id + 1):
for node in anytree.iterators.LevelOrderIter(program_node):
if node.level_id > 1:
profiles_ids = []
# Collect over-lapping unique combinations of (layer_id, level_id, agg_id)
# and combine into a single layer
for overlay_rule in range(1, overlay_loop + 1):
try:
profiles_ids.append(
node_layer_profile_map[(node.name, layer, overlay_rule)])
except:
profiles_ids.append(1)
pass
fm_policytcs_list.append(oed.FmPolicyTc(
layer_id=layer,
level_id=node.level_id - 1,
agg_id=node.agg_id,
profile_id=max(profiles_ids)
))
self.fmprogrammes = pd.DataFrame(fmprogrammes_list)
self.fmprofiles = | pd.DataFrame(fmprofiles_list) | pandas.DataFrame |
import os
import pandas as pd
import json
import re
import gc
from configparser import ConfigParser
from pathlib import Path
from typing import List, Dict, Union, Text, Tuple, Iterable
from numbers import Number
from pathlib import Path
from selenium.webdriver import Firefox
from selenium.webdriver import firefox
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import NoSuchElementException
from scripts.crawler import AcordaosTCU
import sqlite3
firefox_webelements = firefox.webelement.FirefoxWebElement
firefox_webdriver = firefox.webdriver.WebDriver
def parse_json_year_date(year: Number, fullpath: Path) -> Union[Path, None]:
"""
Filtra os arquivos json por ano.
"""
if not isinstance(fullpath, Path):
raise TypeError("O parâmetro path deve do tipo Path.")
pattern_finder = re.search(f"_{year}\.json", fullpath.name)
if pattern_finder:
return fullpath
else:
return None
def load_into_dataframe(jsonFile: List[Dict]) -> pd.DataFrame:
"""
Cria uma DataFrame a partir de uma lista de dicionários (JSON).
"""
# container para armazenar arquivos json
container_of_json = []
for file in jsonFile:
with open(file, "r", encoding="utf8") as f:
d = json.load(f)
container_of_json.append(d)
# container of dataframes
container_of_dataframes = []
for data in container_of_json:
df = pd.read_json(json.dumps(data), orient="records", encoding="utf8")
container_of_dataframes.append(df)
df = pd.concat(container_of_dataframes)
return df
def get_urn(pattern: str, df: pd.DataFrame) -> Dict:
"""
Recebe padrão de urn e coleta todos as ocorrências no dataframe.
"""
urn_container = {}
for index, row in df.iterrows():
if type(row["urn"]) == list:
for data in row["urn"]:
if pattern in data:
if pattern in urn_container:
continue
else:
urn_container[row["urn"]] = row["url"]
else:
if pattern in row["urn"]:
if pattern in urn_container:
continue
else:
urn_container[row["urn"]] = row["url"]
return urn_container
def select_files_based_on_year(path: Path, year: str) -> List[Path]:
"""
Seleciona os arquivos baseado no ano indicado em seus respectivos nomes.
"""
if not isinstance(path, Path):
raise TypeError("O parâmetro path deve do tipo Path.")
container_of_json_year = []
path_to_str = str(path.absolute())
for dirname, _, filenames in os.walk(path_to_str):
for filename in filenames:
path_filename = Path(os.path.join(dirname, filename))
check_pattern = parse_json_year_date(year, path_filename)
if check_pattern:
container_of_json_year.append(path_filename)
return container_of_json_year
def pipeline_to_get_urn(
path: Path, years: List[str], patterns: List[str]
) -> (List[Dict], List[int]):
"""
Pipeline para coletar as urns de um determinado padrão ao longo de vários arquivos.
Atributos:
path: diretório onde estão os arquivos json
years: list de anos que se deseja coletar os dados
pattern: a substring oriunda de uma URN que se deseja buscar
"""
if not isinstance(path, Path):
raise TypeError("O parâmetro path deve do tipo Path.")
container = []
if not isinstance(years, List):
raise TypeError("O parâmetro years precisa ser uma lista.")
if not isinstance(patterns, List):
raise TypeError("O parâmetro patterns precisa ser uma lista.")
#criar container para armazenar os anos que possuem dados
filtered_years = []
for year in years:
container_of_json_year = select_files_based_on_year(path, year)
if not container_of_json_year:
print(f"Não há dados relativos ao {path} e {year}.")
continue
# sort by filename
container_of_json_year = sorted(
container_of_json_year, key=lambda x: int(x.name.split("_")[0])
)
# carrega os dados
df = load_into_dataframe(container_of_json_year)
for pattern in patterns:
print(
f"Iniciando a coleta das urn para o padrão {pattern} na base anual {year}."
)
urn_list = get_urn(pattern, df)
container.append(urn_list)
del urn_list
filtered_years.append(year)
del df
gc.collect()
return container, filtered_years
def create_df_for_urn_data_and_save(data: Dict, filename: str) -> None:
x = pd.DataFrame.from_dict(data, orient="index")
x.reset_index(inplace=True)
x.columns = ["urn", "url"]
x = x[["urn", "url"]]
path_to_save = Path(f"./data/")
path_to_save.mkdir(parents=True, exist_ok=True)
path_to_save = path_to_save / f"{filename}.csv"
x.to_csv(path_to_save, encoding="utf8", index=False)
def initiate_webdriver() -> firefox_webdriver:
config = ConfigParser()
config.read("config.ini")
driver = config["driver"]["driver"]
options = Options()
options.headless = True
path_to_save_logs = Path(config["driver"]["driver_logs"])
if not path_to_save_logs.parent.is_dir():
path_to_save_logs.mkdir(parents=True, exist_ok=True)
browser = Firefox(
executable_path=driver, service_log_path=path_to_save_logs, options=options
)
return browser
def load_csv_into_db(years: List[int], cursor: sqlite3.Cursor) -> None:
for year in years:
df = | pd.read_csv(f"./data/tcu_{year}.csv", sep=",", encoding="utf8") | pandas.read_csv |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import numpy as np
import pandas as pd
import pytest
import mars.oscar as mo
from mars.serialization import AioDeserializer, AioSerializer
from mars.services import start_services, stop_services, NodeRole
from mars.services.storage import StorageAPI
from mars.storage import StorageLevel
@pytest.fixture
async def actor_pools():
async def start_pool():
start_method = os.environ.get('POOL_START_METHOD', 'forkserver') \
if sys.platform != 'win32' else None
pool = await mo.create_actor_pool('127.0.0.1', n_process=2,
subprocess_start_method=start_method,
labels=['main', 'sub', 'io'])
await pool.start()
return pool
worker_pool = await start_pool()
yield worker_pool
await worker_pool.stop()
@pytest.mark.asyncio
async def test_storage_service(actor_pools):
worker_pool = actor_pools
if sys.platform == 'darwin':
plasma_dir = '/tmp'
else:
plasma_dir = '/dev/shm'
plasma_setup_params = dict(
store_memory=10 * 1024 * 1024,
plasma_directory=plasma_dir,
check_dir_size=False)
config = {
"services": ["storage"],
"storage": {
"backends": ["plasma"],
"plasma": plasma_setup_params,
}
}
await start_services(
NodeRole.WORKER, config, address=worker_pool.external_address)
api = await StorageAPI.create('mock_session', worker_pool.external_address)
value1 = np.random.rand(10, 10)
await api.put('data1', value1)
get_value1 = await api.get('data1')
np.testing.assert_array_equal(value1, get_value1)
# test api in subpool
subpool_address = list(worker_pool._sub_processes.keys())[0]
api2 = await StorageAPI.create('mock_session', subpool_address)
assert api2._storage_handler_ref.address == subpool_address
get_value1 = await api2.get('data1')
np.testing.assert_array_equal(value1, get_value1)
sliced_value = await api2.get('data1', conditions=[slice(None, None), slice(0, 4)])
np.testing.assert_array_equal(value1[:, :4], sliced_value)
value2 = pd.DataFrame(value1)
await api2.put('data2', value2)
get_value2 = await api.get('data2')
| pd.testing.assert_frame_equal(value2, get_value2) | pandas.testing.assert_frame_equal |
"""
.. module:: utilities
:platform: Windows
:synopsis: Implementation of various functions that ease the work,
but do not belong in one of the other modules.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import chi2
c = 299792458.0
h = 6.62606957 * (10 ** -34)
q = 1.60217657 * (10 ** -19)
cmap = mpl.colors.ListedColormap(['#A6CEE3', '#1F78B4', '#B2DF8A'])
invcmap = mpl.colors.ListedColormap(['#B2DF8A', '#1F78B4', '#A6CEE3'])
# __all__ = ['ReleaseCurve',
# 'Level',
# 'Energy',
# 'round_to_signif_figs',
# 'weighted_average',
# 'bootstrap_ci',
# 'generate_likelihood_plot',
# 'generate_correlation_plot',
# 'generate_spectrum',
# 'concat_results',
# 'poisson_interval']
def state_number_enumerate(dims, state=None, idx=0):
"""Create the indices for the different entries in
a multi-dimensional array. Code copied from the QuTiP package.
Parameters
----------
shape: tuple
Describes the shape of the multi-dimensional array.
Returns
-------
tuple
Tuple with each entry being a tuple containing the indices."""
if state is None:
state = np.zeros(len(dims))
if idx == len(dims):
yield tuple(state)
else:
for n in range(dims[idx]):
state[idx] = n
for s in state_number_enumerate(dims, state, idx + 1):
yield s
# Create a vectorized function for calling an array of callables,
# mixed with non-callables.
def ifCallableCall(ob, arg):
return ob(arg) if callable(ob) else ob
vFifCallableCall = np.vectorize(ifCallableCall, otypes=[np.float])
def callNDArray(arr, arg):
"""Goes over each subarray in the first dimension,
and calls the corresponding argument. Returns the values itself
if the entry is not callable.
Parameters
----------
arr: NumPy array
Array containing a mix of callable and not-callable entries.
arg: misc
Argument to be passed to each callable entry.
Returns
-------
NumPy array
Array with the callable entries replaced by the returned value."""
n = arr.shape[0]
assert n == len(arg)
res = np.zeros(arr.shape)
for i in range(n):
# Go for the vectorized function. In case of problems,
# comment the following line and use the try-except
# block. That is proven to work, but is slower.
res[i] = vFifCallableCall(arr[i], arg[i])
# try:
# res[i] = np.array(
# [[x(arg[i]) if callable(x) else x for x in xarr]
# for xarr in arr[i]])
# except ValueError:
# raise ValueError()
return res
class ReleaseCurve(object):
r"""Creates a callable object for the standard release curve. Formula
based on <NAME> et al. :cite:`Ramos2014`. Input parameters are
initialized to an 35Ar release curve.
Parameters
----------
amp : float,
Influences the height of the curve, roughly the maximum of the
release rate. Is also an attribute. Default: 4E7
a : float between 0 and 1
Weighting of the different exponentials in the formula. Is also an
attribute. Default: 0.9
tr : float
Time constant parameter in seconds. The attribute is saved as the
corresponding l-parameter. Default: 78 ms
tf : float
Time constant parameter in seconds. The attribute is saved as the
corresponding l-parameter. Default: 396 ms
ts : float
Time constant parameter in seconds. The attribute is saved as the
corresponding l-parameter. Default: 1905 ms
pulses : integer
Number of pulses seperated by the delay parameter. Has no effect if the
:attr:`continued` parameter is True. Is also an attribute. Default: 3
delay : float
Seconds between pulses. Is also an attribute. Default: 10.0 s
continued : bool
Continuously generate pulses seperated by the delay parameter if True,
else create the number of pulses given in the pulses parameter. Is also
an attribute. Default: True
Note
----
The l-parameters are related to the t-parameters through
:math:`l = \frac{\ln(2)}{t}`. The release curve is modeled as:
.. math::
RC\left(t\right) = a\left(1-\exp\left(-l_rt\right)\right)
\left(a\exp\left(-l_ft\right)+(1-a)\exp\left(-l_st\right)\right)"""
def __init__(self, amp=4.0 * 10 ** 7, a=0.9,
tr=78 * (10 ** -3), tf=396 * (10 ** -3),
ts=1905 * (10 ** -3),
pulses=3, delay=10.0, continued=True):
super(ReleaseCurve, self).__init__()
self.amp = amp
self.a = a
self.lr = np.log(2) / tr
self.lf = np.log(2) / tf
self.ls = np.log(2) / ts
self.pulses = pulses
self.delay = delay
self.continued = continued
def fit_to_data(self, t, y, yerr):
"""If a release curve is measured as a function of time, this should
fit the parameters to the given curve y(t) with errors yerr.
Parameters
----------
t: array_like
Timevector of the measurements.
y: array_like
Counts corresponding to t.
yerr: array_like
Counting errors of y.
Warning
-------
This method has not been tested!"""
import lmfit as lm
params = lm.Parameters()
params.add_many(
('Amp', self.amp, True, 0, None, None),
('a', self.a, True, 0, 1, None, None),
('tr', np.log(2) / self.lr, True, None, None, None),
('tf', np.log(2) / self.lf, True, None, None, None),
('ts', np.log(2) / self.ls, True, None, None, None))
def resid(params):
self.amp = params['Amp']
self.a = params['a']
self.lr = np.log(2) / params['tr']
self.lf = np.log(2) / params['tf']
self.ls = np.log(2) / params['ts']
return (y - self.empirical_formula(t)) / yerr
return lm.minimize(resid, params)
@property
def pulses(self):
return self._pulses
@pulses.setter
def pulses(self, value):
self._pulses = int(value)
@property
def continued(self):
return self._continued
@continued.setter
def continued(self, value):
self._continued = (value == 1)
def empirical_formula(self, t):
amp = self.amp
a = self.a
lr = self.lr
lf = self.lf
ls = self.ls
val = amp * (1 - np.exp(-lr * t)) * (a * np.exp(-lf * t) +
(1 - a) * np.exp(-ls * t))
return val
def __call__(self, t):
"""Return the evaluation of the formula, taking the pulses
and delays into account.
Parameters
----------
t: array_like
Times for which the yield is requested."""
pulses = self.pulses
delay = self.delay
continued = self.continued
pulses = np.arange(1.0, pulses) * delay
rc = self.empirical_formula(t)
if not continued:
for pulsetime in pulses:
mask = t > pulsetime
try:
if any(mask):
rc[mask] += self.empirical_formula(t[mask] - pulsetime)
except TypeError:
if mask:
rc += self.empirical_formula(t - pulsetime)
else:
pulsetime = delay
try:
number = (t // pulsetime).astype('int')
for pulses in range(1, max(number) + 1):
mask = (number >= pulses)
rc[mask] += self.empirical_formula(t[mask] -
pulses * pulsetime)
except AttributeError:
number = int(t // pulsetime)
if number > 0:
for i in range(number):
rc += self.empirical_formula(t - (i + 1) * pulsetime)
return rc
class Level(object):
"""Ease-of-use class for representing a level.
Parameters
----------
energy : float
Fine structure energy in eV.
hyp_par : list of 2 floats
Hyperfine parameters [A, B] in MHz.
L, S, J : integer or half-integers
Spin quantum numbers."""
def __init__(self, energy, hyp_par, L, S, J):
super(Level, self).__init__()
self.energy = energy
self.A, self.B = hyp_par
self.L = L
self.S = S
self.J = J
def __str__(self):
s = '<Level object: E=%f, A=%f, B=%f, L=%f, S=%f, J=%f>' % (
self.energy, self.A, self.B, self.L, self.S, self.J)
return s
def invCM2MHz(invCM):
return invCM * 100.0 * c * 10 ** -6
def MHz2invCM(MHz):
return MHz * 10 ** 6 / (100.0 * c)
def invCM2eV(invCM):
return invCM * 100.0 * h * c / q
def eV2invCM(eV):
return eV * q / (100.0 * h * c)
def invCM2nm(invCM):
return ((invCM * 100.0) ** -1) * (10 ** 9)
def nm2invCM(nm):
return ((nm * (10 ** -9)) ** -1) / 100.0
class Energy(object):
"""Ease-of-use class to represent energy and frequencies.
Uses automatic conversion to a series of units.
Parameters
----------
value: float
Value of the energy or frequency to be converted/worked with.
unit: string, {cm-1, MHz, eV, nm}
String denoting the unit for the given value. Default value is inverse
centimeters (cm-1)."""
__units__ = ['cm-1', 'MHz', 'eV', 'nm']
__conversion__ = {'MHz': invCM2MHz,
'eV': invCM2eV,
'nm': invCM2nm}
def __init__(self, value, unit='cm-1'):
super(Energy, self).__init__()
if unit not in self.__units__:
m = '{} is an unknown unit!'.format(unit)
raise TypeError(m)
self.unit = unit
self.value = value
convert = {'MHz': MHz2invCM,
'eV': eV2invCM,
'nm': nm2invCM}
if self.unit in convert.keys():
self.value = convert[self.unit](self.value)
self.unit = 'cm-1'
def __call__(self, unit):
"""Convert the value to the given unit.
Parameters
----------
unit: string
Requested unit, must be 'cm-1', 'MHz', 'eV' or 'nm'.
Returns
-------
float
Converted value."""
if unit in self.__conversion__.keys():
val = self.__conversion__[unit](self.value)
else:
val = self.value
return val
def round_to_signif_figs(vals, n):
"""
Code copied from
http://stackoverflow.com/questions/18915378/rounding-to-significant-figures-in-numpy
Goes over the list or array of vals given, and rounds
them to the number of significant digits (n) given.
Parameters
----------
vals : array_like
Values to be rounded.
n : integer
Number of significant digits to round to.
Note
----
Does not accept: inf, nan, complex
Example
-------
>>> m = [0.0, -1.2366e22, 1.2544444e-15, 0.001222]
>>> round2SignifFigs(m,2)
array([ 0.00e+00, -1.24e+22, 1.25e-15, 1.22e-03])
"""
if np.all(np.isfinite(vals)) and np.all(np.isreal((vals))):
eset = np.seterr(all='ignore')
mags = 10.0 ** np.floor(np.log10(np.abs(vals))) # omag's
vals = np.around(vals / mags, n - 1) * mags # round(val/omag)*omag
np.seterr(**eset)
vals[np.where(np.isnan(vals))] = 0.0 # 0.0 -> nan -> 0.0
else:
raise IOError('Input must be real and finite')
return vals
def weighted_average(x, sigma, axis=None):
r"""Takes the weighted average of an array of values and the associated
errors. Calculates the scatter and statistical error, and returns
the greater of these two values.
Parameters
----------
x: array_like
Array-like assortment of measured values, is transformed into a
1D-array.
sigma: array_like
Array-like assortment of errors on the measured values, is transformed
into a 1D-array.
Returns
-------
tuple
Returns a tuple (weighted_average, uncertainty), with the uncertainty
being the greater of the uncertainty calculated from the statistical
uncertainty and the scattering uncertainty.
Note
----
The formulas used are
.. math::
\left\langle x\right\rangle_{weighted} &= \frac{\sum_{i=1}^N \frac{x_i}
{\sigma_i^2}}
{\sum_{i=1}^N \frac{1}
{\sigma_i^2}}
\sigma_{stat}^2 &= \frac{1}{\sum_{i=1}^N \frac{1}{\sigma_i^2}}
\sigma_{scatter}^2 &= \frac{\sum_{i=1}^N \left(\frac{x_i-\left\langle
x\right\rangle_{weighted}}
{\sigma_i}\right)^2}
{\left(N-1\right)\sum_{i=1}^N \frac{1}{\sigma_i^2}}"""
# x = np.ravel(x)
# sigma = np.ravel(sigma)
Xstat = (1 / sigma**2).sum(axis=axis)
Xm = (x / sigma**2).sum(axis=axis) / Xstat
# Xscatt = (((x - Xm) / sigma)**2).sum() / ((1 - 1.0 / len(x)) * Xstat)
Xscatt = (((x - Xm) / sigma)**2).sum(axis=axis) / ((len(x) - 1) * Xstat)
Xstat = 1 / Xstat
return Xm, np.maximum.reduce([Xstat, Xscatt], axis=axis) ** 0.5
def bootstrap_ci(dataframe, kind='basic'):
"""Generate confidence intervals on the 1-sigma level for bootstrapped data
given in a DataFrame.
Parameters
----------
dataframe: DataFrame
DataFrame with the results of each bootstrap fit on a row. If the
t-method is to be used, a Panel is required, with the data in
the panel labeled 'data' and the uncertainties labeled 'stderr'
kind: str, optional
Selects which method to use: percentile, basic, or t-method (student).
Returns
-------
DataFrame
Dataframe containing the left and right limits for each column as rows.
"""
if isinstance(dataframe, pd.Panel):
data = dataframe['data']
stderrs = dataframe['stderr']
args = (data, stderrs)
else:
data = dataframe
args = (data)
def percentile(data, stderrs=None):
CI = pd.DataFrame(index=['left', 'right'], columns=data.columns)
left = data.apply(lambda col: np.percentile(col, 15.865), axis=0)
right = data.apply(lambda col: np.percentile(col, 84.135), axis=0)
CI.loc['left'] = left
CI.loc['right'] = right
return CI
def basic(data, stderrs=None):
CI = pd.DataFrame(index=['left', 'right'], columns=data.columns)
left = data.apply(lambda col: 2 * col[0] - np.percentile(col[1:],
84.135),
axis=0)
right = data.apply(lambda col: 2 * col[0] - np.percentile(col[1:],
15.865),
axis=0)
CI.loc['left'] = left
CI.loc['right'] = right
return CI
def student(data, stderrs=None):
CI = | pd.DataFrame(index=['left', 'right'], columns=data.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
""" Simple multi-area model for Nordic electricity market
Created on Wed Jan 16 11:31:07 2019
@author: elisn
Notes:
1 - For conversion between dates (YYYYMMDD:HH) and weeks (YYYY:WW) weeks are counted as starting during the first hour
in a year and lasting 7 days, except for the last week which covers the remaining hours in the year. Thus all years
are assumed to have 52 weeks. This definition is not according to ISO calendar standard but is legacy from the
first version of the model, probably changing it would not significantly change the results. Also note that the
MAF inflow data used also does not follow ISO calendar standard for weeks but counts weeks as starting with Sundays.
2 - It is not known if the ENTSO-E reservoir data corresponds to the reservoir level at the beginning/end of the week.
This can decrease the accuracy of the model for short time periods but does not affect much when simulating a whole year
A request has been made to find the answer from ENTSO-E
3 - For the exchange GB-NL, February 20-27 2016, the flows and scheduled exchanges are outside the implicitly
allocated day ahead capacity, it's not known why
"""
### EXTERNAL LIBRARIES ###
import time
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from pathlib import Path
import datetime
import os
import pyomo.environ as pye
from contextlib import redirect_stdout
##############################################################
######### MODULES FROM POWER_DATABASES #######################
##############################################################
import maf_hydro_data
import maf_pecd_data
import entsoe_transparency_db as entsoe
from help_functions import compact_xaxis_ticks, \
week_to_range, str_to_date, intersection, duration_curve, interp_time, \
interpolate_weekly_values, time_to_bin, err_func, curtailment_statistics
### INTERNAL MODULES ###
from offer_curves import SupplyCurve
from model_definitions import MWtoGW, GWtoMW, cm_per_inch, std_fig_size, area_to_country, country_to_areas, entsoe_type_map, synchronous_areas, colors, \
nordpool_capacities, generators_def, solver_executables, solver_stats, bidz2maf_pecd, co2_price_ets, \
new_trans_cap, GWtoTW, TWtoGW, all_areas
from help_classes import EmptyObject, Error
from week_conversion import WeekDef
class Model:
""" Contains all data processing not related to specific solver api (gurobi/pyomo)
NAMING CONVENTIONS:
df_xxx - dataframe obtained from external database
TIME CONVENTIONS:
For average energy quantities, time stamp marks beginning of the (hourly) interval. This is consistent with
convention in databases, since the beginning of the hour has been used to time stamp hourly data.
starttime - beginning of first period in model
endtime - end of last period in model
timerange - all hours modelled (beginning of hour)
idx_time - index of timerange, used to create time set in optimization model
timerange_p1 - all hours including endtime hour
Note: the data used is retrieved for all hours in timerange plus one extra hour, to allow for interpolation of
the data to higher resolution
"""
def __init__(self,name='default',path='D:/NordicModel/Results',db_path='D:/Data',
data_path='D:/NordicModel/InputData'):
self.name = name
self.data_path = Path(data_path)
self.db_path = Path(db_path)
self.res_path = Path(path) / name
self.fig_path = self.res_path / 'Figures'
self.root_path = self.res_path # points to root directory of this model
self.res_path.mkdir(exist_ok=True,parents=True)
self.fig_path.mkdir(exist_ok=True,parents=True)
self.runs = [] # store results from multiple model runs
self.res_time = {} # store runtime info
def update_path(self,path='D:/NordicModel/Results/case'):
""" Update path where figures and results are stored, without changing root path """
self.res_path = Path(path)
self.fig_path = self.res_path / 'Figures'
self.res_path.mkdir(exist_ok=True)
self.fig_path.mkdir(exist_ok=True)
def default_options(self):
""" Set default options for model """
############# BASIC OPTIONS ##############
self.opt_solver = 'ipopt' # solver to use, must be installed
self.opt_api = 'pyomo' # pyomo/gurobi (gurobi api only works if solver is also gurobi)
self.opt_solver_opts = {} # options to pass to solver (with pyomo api)
self.opt_start = '20180101'
self.opt_end = '20180108'
self.opt_weather_year = 2016 # used to get maf data, inflow data, and solar merra data
self.opt_load_scale = 1 # scale load by this factor
self.opt_loss = 0 # Fraction of energy lost in transmission
self.opt_nonnegative_data = ['inflow']
self.opt_countries = ['SE','DK','NO','FI','EE','LT','LV','PL','DE','NL','GB'] # modelled countries
self.opt_use_maf_pecd = False # use solar and wind data from MAF2020
self.opt_impute_limit = 30 # maximum number of values to interpolate in data
self.opt_impute_constant = { # constants used to impute remaining missing values in input data
'exchange':0, # for external exchanges
'solar':0,
}
self.opt_run_initialization = False # run low resolution model to get values for initialization
self.opt_init_delta = 168
# Note: initialization is useful for some solvers (e.g. ipopt) but may not be for others (e.g. gurobi)
self.opt_db_files = {
'capacity':'capacity.db',
'prices':'prices.db',
'exchange':'exchange.db',
'gen':'gen.db',
'unit':'unit.db',
'load':'load.db',
'reservoir':'reservoir.db',
'inflow':'inflow.db',
'maf_hydro':'maf_hydro.db',
'maf_pecd':'maf_pecd.db',
}
self.opt_err_labl = 'MAE' # should be consistent with the error computed in err_func
########## COST OPTIONS ##########################
self.opt_costfit_tag = '2019' # use this costfit from the input parameters
self.opt_hydro_cost = False # include fitted hydro costs, not properly implemented
self.opt_default_thermal_cost = 40 # default value for thermal cost
self.opt_loadshed_cost = 3000 # cost for demand curtailment
self.opt_nuclear_cost = 7.35 # default value for nuclear cost
self.opt_wind_cost = 1 # low wind cost in EUR/MWh to favour wind curtailment over solar
self.opt_use_var_cost = True # use variable costs
# Source for variable cost data: data['costfit_shifted']['tag']
# replace extreme cost fits (e.g. decreasing mc or very sharply increasing mc with fuel-based constant MC)
self.opt_overwrite_bad_costfits = True
self.opt_c2_min = 1e-5
self.opt_c2_max = 0.5
# specify co2 price, this is added to the price coefficient MC(p)=k*p+m+(co2_price-co2_price(offset_year))
self.opt_co2_price = None
self.opt_co2_price_offset_year = 2016 # if set to year, this assumes m already contains the cost for that year
############ TECHNICAL LIMITS #########################
self.opt_capacity_year = 2019 # use generation capacity from entsoe for this year
self.opt_hvdc_max_ramp = 600 # 600 MW/hour
self.opt_pmax_type = 'capacity'
self.opt_pmax_type_hydro = 'stats'
# Options for pmax: 'stats' - from gen_stats.xlsx (production statistics)
# 'capacity' - from entsoe capacity per type database
# For hydro the source for the maximum capacity is chosen separately
self.opt_pmin_zero = False # put pmin = 0
######### NUCLEAR OPTIONS ################
self.opt_nucl_min_lvl = 0.65 # nuclear can ramp down to this level
self.opt_nucl_ramp = None # overwrite nuclear ramp rate (%/hour)
self.opt_nucl_add_cap = {
'SE3':0,
'FI':0,
'DE':0,
} # add this firm capacity to nuclear generation
# option to compute nuclear max levels from individual units for some areas, can be used to deactivate certain
# nuclear reactors in order to simulate scenarios, requires production data for individual units
self.opt_nucl_individual_units = []
# exclude these nuclear reactors when deciding maximum generation levels - only possible with opt_nucl_individual_units
self.opt_nucl_units_exclude = []
#self.opt_nucl_units_exclude = ['Ringhals block 1 G11','Ringhals block 1 G12','Ringhals block 2 G21','Ringhals block 2 G22']
######### HYDRO OPTIONS #################
self.opt_reservoir_offset = 168
self.opt_reservoir_data_normalized = True # use normalized reservoir data
self.opt_default_inflow = 100
self.opt_default_inflow_area = { # GWh/week, per area
'DE':346, # 180 TWh yearly production
'PL':45,
'GB':107,
}
self.opt_use_maf_inflow = False # use MAF inflow data or inflow calculated from ENTSO-E data
# inflow interpolation:
# constant (i.e. constant for one week)
# linear (linear ramp rate between weeks)
self.opt_inflow_interp = 'linear'
self.opt_hydro_daily = False # daily reservoir constraints (instead of hourly)
self.opt_reservoir_start_fill = 0.5 # if reservoir data does not exist, assume default filling value
self.opt_reservoir_end_fill = 0.5
# share of inflow which is run of river, if no data available
self.opt_ror_fraction = {
'SE1':0.13,
'SE2':0.21,
'SE3':0.27,
'SE4':0.3,
'NO1':0.25,
'NO2':0,
'NO3':0,
'NO4':0,
'NO5':0,
'FI':0.27,
'LV':0.4,
'LT':0.5,
'PL':0.8,
'DE':0.9,
'GB':0.4,
}
self.opt_reservoir_capacity = { # GWh
'NO1':6507,
'NO2':33388,
'NO3':8737,
'NO4':19321,
'NO5':16459,
'SE1':13688,
'SE2':15037,
'SE3':2517,
'SE4':216,
'FI':4512,
'LT':12.2,
'LV':11.2,
'PL':2.3,
'DE':1263,
'GB':26.4,
}
# pumping capacity
self.opt_pump_capacity = { # in MW, from MAF data
'PL':1660,
'DE':7960,
'GB':2680,
'NO1':130,
'NO2':430,
'NO3':70,
'NO5':470,
'LT':720,
}
self.opt_pump_reservoir = { # in GWh
'PL':6.3,
'DE':20,
}
# pumping efficiency
self.opt_pump_efficiency = 0.75
############# RESERVE OPTIONS ################
self.opt_use_reserves = False # include reserve requirements
self.opt_country_reserves = False # reserves by country instead of by area (more flexibility)
self.opt_reserves_fcrn = { # this is the allocation of 600 MW FCR-N
'SE':245,
'NO':215,
'DK':0,
'FI':140,
}
self.opt_reserves_fcrd = 1200 # FCR-D, allocated in same proportion as FCR-N
######## EXTERNAL AREAS OPTIONS #################
# the price will be set for these price areas, and the export/import will be variable instead of fixed
self.opt_set_external_price = ['DE','PL']
self.opt_default_prices = {
'PL':40, # use this price for external connections if no other is avaialable
'RU':40,
'DE':40,
'NL':40,
'GB':40,
}
self.opt_exchange_data_type = 'flow'
########### TRANSFER CAPACITY OPTIONS #####################
self.opt_use_var_exchange_cap = True
self.opt_nominal_capacity_connections = [('NL','GB'),]
# these connections will always use nomianl exchange capacity
self.opt_min_exchange_cap = 100 # minimum variable transfer capacity (MW)
# may be set to >= 2018 to include additional future transmission capacity,
# from new_trans_cap in model_definitions
self.opt_exchange_cap_year = None
########## WIND OPTIONS #############
self.opt_wind_scale_factor = {
'SE1':1,
'SE2':1,
'SE3':1,
'SE4':1,
}
self.opt_wind_capacity_onsh = {
'DK1':3725,
'DK2':756,
'EE':329,
'FI':2422,
'LT':540,
'LV':84,
'NO1':166,
'NO2':1145,
'NO3':1090,
'NO4':668,
'NO5':0,
'SE1':1838,
'SE2':3849,
'SE3':2780,
'SE4':1581,
'PL':5952,
'NL':3973,
'DE':53338,
'GB':14282,
}
self.opt_wind_capacity_offsh = {
'DK1':1277,
'DK2':423,
'EE':0,
'FI':0,
'LT':0,
'LV':0,
'NO1':0,
'NO2':0,
'NO3':0,
'NO4':0,
'NO5':0,
'SE1':0,
'SE2':0,
'SE3':0,
'SE4':0,
'PL':0,
'NL':1709,
'DE':7504,
'GB':10383,
}
########### SOLAR OPTIONS #############
# Note: the solar capacities only apply if opt_use_maf_pecd is True, otherwise ENTSO-E production data is used for solar
# manually specify solar capacity for areas:
self.opt_solar_cap_by_area = {
'DK1':878, # from ENTSO-E
'DK2':422,
'EE':164,
'FI':215,
'LT':169,
'LV':11,
'SE1':9, # from Energiåret 2020 (energiföretagen)
'SE2':67,
'SE3':774,
'SE4':240,
'PL':1310,
'NL':5710,
'DE':48376,
'GB':13563,
}
# if solar capacity for an area is not specified, the aggregated value
# for that country is used, weighted by the areas share of total load
self.opt_solar_cap_by_country = { # from IRENA Capacity Statistics 2020
'DK':1079,
'FI':215,
'NO':90,
'SE':644,
'LV':3,
'LT':103,
'EE':107,}
########## INERTIA OPTIONS ####################
self.opt_use_inertia_constr = False # inertia constraints
self.opt_min_kinetic_energy = 113 # GWs
# calculation of kinetic energy: Ek = H*P/(cf*pf)
# inertia constants from Persson (2017) Kinetic Energy Estimation in the Nordic System
self.opt_inertia_constants = {
'SE':{'Hydro':4.5,'Thermal':2.9,'Nuclear':6.2,},
'NO':{'Hydro':2.9,'Thermal':2.5,},
'FI':{'Hydro':2.8,'Thermal':4.4,'Nuclear':6.6,},
'DK':{'Thermal':4.5,},
}
# assumption about power factor pf
self.opt_inertia_pf = {
'SE':{'Hydro':0.9,'Thermal':0.9,'Nuclear':0.9,},
'NO':{'Hydro':0.9,'Thermal':0.9,},
'FI':{'Hydro':0.9,'Thermal':0.9,'Nuclear':0.9,},
'DK':{'Thermal':0.9,},
}
# assumption about capacity factor cf
self.opt_inertia_cf = {
'SE':{'Hydro':0.8,'Thermal':1,'Nuclear':1,},
'NO':{'Hydro':0.8,'Thermal':1,},
'FI':{'Hydro':0.8,'Thermal':1,'Nuclear':1,},
'DK':{'Thermal':1,},
}
####### ROUNDING VALUES ##############
self.opt_bound_cut = { # round values below this threshold to zero, to avoid small coefficients
'max_SOLAR':1e-4,
'max_WIND':1e-4,
'min_PG':1e-4,
}
######## FIGURE OPTIONS ##################
self.fopt_no_plots = False
self.fopt_plots = {
'gentype':True,
'gentot':True,
'gentot_bar':False,
'renewables':False,
'transfer_internal':True,
'transfer_external':True,
'reservoir':False,
'price':False,
'losses':False,
'load_curtailment':False,
'inertia':False,
'hydro_duration':False,
'wind_curtailment':False,
}
self.fopt_plot_weeks = []
self.fopt_use_titles = True
self.fopt_show_rmse = True # also show absolute RMSE on fopt_plots
self.fopt_eps = False
self.fopt_print_text = False # print model to text file
self.fopt_print_dual_text = False # print dual to text file
self.fopt_dpi_qual = 1000
# control inset in plot
self.fopt_inset_date = None
self.fopt_inset_days = 5
self.fopt_calc_rmse = { # some rmse calculations need additional data
'price':True,
'transfer':True
}
self.fopt_rmse_transfer_data_type = 'flow'
##### OPTIONS TO PRINT OUTPUT ######
self.opt_print = {
'init':True,
'solver':True,
'setup':True,
'postprocess':True,
'check':True,
}
self.default_pp_opt()
def default_pp_opt(self):
########## OPTIONS CONTROLLING POST PROCESSING ###############
self.pp_opt = EmptyObject()
self.pp_opt.get_vars = ['SPILLAGE','PG','RES','X1','X2','WIND','XEXT','LS','SOLAR','HROR','PUMP','REL','PRES']
self.pp_opt.inst_vars = ['RES','PRES']
self.pp_opt.daily_vars = ['RES','SPILLAGE'] # daily variables if opt_hydro_daily is True
# Note: duals only obtained only if the constraint exists (some constraints are optional)
self.pp_opt.get_duals = ['POWER_BALANCE','RESERVOIR_BALANCE','HVDC_RAMP','GEN_RAMP',
'RESERVES_UP','RESERVES_DW','FIX_RESERVOIR','INERTIA']
self.pp_opt.get_cur_vars = ['WIND','SOLAR','HROR']
def effective_reservoir_range(self):
# effective ranges, based on min and max reservoir values from entso-e data
self.opt_reservoir_capacity = { # GWh
'SE1':11326,
'SE2':13533,
'SE3':1790,
'SE4':180,
'FI':2952,
'NO1':6078,
'NO2':21671,
'NO3':7719,
'NO4':14676,
'NO5':14090,
'LT':11.8,
'LV':9.4,
'DE':2430,
'PL':2800,
'GB':4100,
}
def vre_cap_2016(self):
""" Set wind and solar capacities to values from 2016, for validation of model with MAF data for this year """
pass
# SOLAR CAPACITY
self.opt_solar_cap_by_area = {
'DK1':421, # from ENTSO-E
'DK2':180,
'PL':77,
'NL':1429,
'DE':40679,
'GB':11914,
}
# if solar capacity for an area is not specified, the aggregated value
# for that country is used, weighted by the areas share of total load
self.opt_solar_cap_by_country = { # from IRENA Capacity Statistics
'DK':851,
'FI':39,
'NO':27,
'SE':153,
'LV':1,
'LT':70,
'EE':10,
}
# MAF WIND CAPACITY
self.opt_wind_capacity_onsh = {
'DK1':2966,
'DK2':608,
'EE':375,
'FI':2422,
'LT':366,
'LV':55,
'NO1':0,
'NO2':261,
'NO3':361,
'NO4':251,
'NO5':0,
'SE1':524,
'SE2':2289,
'SE3':2098,
'SE4':1609,
'PL':5494,
'NL':3284,
'DE':45435,
'GB':10833,
}
self.opt_wind_capacity_offsh = {
'DK1':843,
'DK2':428,
'EE':0,
'FI':0,
'LT':0,
'LV':0,
'NO1':0,
'NO2':0,
'NO3':0,
'NO4':0,
'NO5':0,
'SE1':0,
'SE2':0,
'SE3':0,
'SE4':0,
'PL':0,
'NL':357,
'DE':4000,
'GB':5293,
}
def run(self,save_model=False):
""" Run single case of model, for current settings """
pass
self.res_time = {}
t_0 = time.time()
self.setup()
self.res_time['pre'] = time.time() - t_0
t__0 = time.time()
self.setup_child_model()
self.res_time['cm'] = time.time() - t__0
self.solve()
t__0 = time.time()
self.post_process()
self.res_time['post'] = time.time() - t__0
self.res_time['tot'] = time.time() - t_0
if save_model:
self.save_model()
def run_years(self,years=range(2015,2017),append=False,save_full_model=False):
""" Run model using weather data for multiple years between start and end
save_full_model: Save full model using save_model for start year in root path
"""
start = years[0]
self.opt_weather_year = start
self.update_path(self.root_path/f'{start}')
# run first instance of model
self.run()
self.save_model_run(append=append)
if save_full_model:
self.update_path(self.root_path)
self.save_model()
# update weather data and run remaining instances
for year in years[1:]:
self.update_path(self.root_path/f'{year}')
self.re_run_year(year=year)
self.save_model_run(append=append)
def re_run_year(self,year=2015):
""" Update the weather year and re-run model """
print(f'---- RE-RUN YEAR {year} -----')
self.res_time = {}
t_0 = time.time()
self.opt_weather_year = year
self.setup_weather_indices()
self.get_inflow_data()
self.setup_inflow()
self.setup_run_of_river()
self.setup_inflow_feasibility()
self.max_HROR = {
(a,t):self.ror_hourly.at[self.timerange[t],a]*MWtoGW for a in self.ror_areas for t in self.idx_time
}
self.setup_solar()
self.setup_wind()
self.max_SOLAR = {
(a,t):self.solar.at[self.timerange[t],a] for a in self.solar_areas for t in self.idx_time
}
self.max_WIND = {
(a,t):self.wind.at[self.timerange[t],a]*self.opt_wind_scale_factor[a]
for a in self.wind_areas for t in self.idx_time
}
for name in ['max_WIND','max_SOLAR']:
self.round_bound(name)
#%%
if self.opt_run_initialization:
self.run_init_model()
#%%
self.res_time['pre'] = time.time() - t_0
t_1 = time.time()
self.cm.update_inflow()
self.cm.update_ror(self.max_HROR)
self.cm.update_solar(self.max_SOLAR)
self.cm.update_wind(self.max_WIND)
self.res_time['cm'] = time.time() - t_1
#%% rerun model
self.solve()
t_1 = time.time()
self.post_process()
self.res_time['post'] = time.time() - t_1
self.res_time['tot'] = time.time() - t_0
print(f'------ FINISHED YEAR {year} --------')
def load_results_years(self,vars=['res_PG','res_LS'],years=None):
""" Get given results for all yearly runs"""
res = {
v:{} for v in vars
}
exist_years = []
for y in [y for y in os.listdir(self.root_path) if os.path.isdir(self.root_path / y)]:
try:
exist_years.append(int(y))
except Exception:
pass
if years is None:
years = exist_years
else:
years = [y for y in exist_years if y in years]
# get results from all runs
for y in years:
self.load_model_run(y)
for v in vars:
res[v][y] = self.__getattribute__(v)
return res
def round_bound(self,name):
prt = self.opt_print['setup']
if name in self.opt_bound_cut:
thrs = self.opt_bound_cut[name]
dic = self.__getattribute__(name)
count = 0
for i,val in dic.items():
if val > 0 and val < thrs:
dic[i] = 0
count += 1
if count and prt:
print(f'Rounded {count} values to zero in {name}')
def save_model(self):
"""
Dump all model results to pickle file. Also save options, gen data etc., as well as self.runs
Can produce very large file if several runs are stored in self.runs
The valus saved are sufficient to rerun all plot functions, after first calling setup_data
"""
d = {}
save_vars = ['runs','ror_areas','generators_def','hydrores','areas','wind_areas','solar_areas','pump_res_areas',
'pump_areas','ror_reserve_areas','nuclear_areas','resareas','syncareas','gen_in_area',
'xtrans_int','xtrans_ext','rescountries','reservoir_capacity','pump_reservoir','fixed_transfer_connections',
'fixed_price_connections','area_sep_str','solar_capacity',
]
vars = [v for v in dir(self) if v.split('_',1)[0] in ['res','gen','idx','opt','fopt','dual','max','min'] or v in save_vars]
for v in vars:
d[v] = self.__getattribute__(v)
with open(self.root_path/f'results.pkl','wb') as f:
pickle.dump(d,f)
def save_model_run(self,append=False):
"""
Dump results from current model run in results.pkl
If append=True, results are also appended to list in self.runs
Storing many runs in self.runs can consume lots of memory, so it may
be better just to save the pickle files and load them when needed
"""
# save_entities = ['inflow_hourly','weeks','inflow','inflow_hourly_tmp','ror_hourly']
save_entities = []
run = {
v:self.__getattribute__(v) for v in [ v for v in dir(self) if v.split('_',1)[0] == 'res' or v in save_entities]
}
run['opt_weather_year'] = self.opt_weather_year
if append:
self.runs.append(run)
with open(self.res_path/f'results.pkl','wb') as f:
pickle.dump(run,f)
def load_model(self):
with open(self.res_path/f'results.pkl','rb') as f:
d = pickle.load(f)
for v in d:
self.__setattr__(v,d[v])
def load_model_run(self,year=2015):
self.res_path = self.root_path / f'{year}'
self.load_model()
self.res_path = self.root_path
def redo_plots(self):
print('----- REDO PLOTS -----')
self.load_model()
self.setup_indices()
self.setup_weather_indices()
self.setup_data()
self.get_rmse_data()
self.plot_figures()
def setup_child_model(self):
""" Create the Pyomo/Gorubi model object """
api = self.opt_api
solver = self.opt_solver
# Choose child model "cm" class depending on api type
if api == 'gurobi' and solver == 'gurobi':
from gurobi_model import GurobiModel
self.cm = GurobiModel(name=self.name)
else:
if api == 'gurobi':
print(f'WARNING: Can only use gurobi api with gurobi, using pyomo api!')
from pyomo_model import PyomoModel
self.cm = PyomoModel()
self.cm.setup_opt_problem(self)
def setup(self):
pass
prt = self.opt_print['setup']
self.vars_df_up_bound = {
'WIND':['wind_areas','idx_time'],
'SOLAR':['solar_areas','idx_time'],
'LS':['areas','idx_time'],
'HROR':['ror_areas','idx_time'],
}
print('----- SETUP -------------')
self.setup_indices()
self.setup_weather_indices()
self.setup_transmission()
if prt:
print('----- SETUP DATA --------')
self.setup_data()
if prt:
print('----- SETUP GEN ---------')
self.setup_gen()
if prt:
print('----- SETUP RESERVES ----')
self.setup_reserves()
if prt:
print('----- SETUP HYDRO -------')
self.setup_hydro()
if prt:
print('----- SETUP WIND --------')
self.setup_wind()
if prt:
print('----- SETUP SOLAR -------')
self.setup_solar()
if prt:
print('----- SETUP RESERVOIR ---')
self.setup_reservoir_values()
if prt:
print('----- SETUP INFLOW ------')
self.setup_inflow()
if prt:
print('----- SETUP ROR --------')
self.setup_run_of_river()
self.setup_inflow_feasibility()
if prt:
print('----- SETUP BOUNDS -----')
self.setup_bounds()
if self.opt_run_initialization:
self.run_init_model()
print('----- SETUP COMPLETE ----')
self.print_hydro_table()
self.print_renewable_table()
def solve(self):
""" Solve model """
print(' ----- STARTING SOLVER -----')
prt = self.opt_print['solver']
solver = self.opt_solver
if not hasattr(self,'cm'):
print('Model does not have child model, run "setup_child_model"')
return None
elif self.cm.api == 'pyomo': # pyomo model
## DECLARE DUAL
if not hasattr(self.cm,'dual'):
self.cm.dual = pye.Suffix(direction=pye.Suffix.IMPORT)
## SOLVE MODEL
if solver in solver_executables: # give explicit solver path
opt = pye.SolverFactory(solver,executable=solver_executables[solver],options=self.opt_solver_opts)
else:
opt = pye.SolverFactory(solver,options=self.opt_solver_opts)
res = opt.solve(self.cm, tee=prt)
if 'Time' in res['solver'][0]:
self.res_time['solver'] = res['solver'][0]['Time']
else:
self.res_time['solver'] = np.nan
self.res_stats = {
name:res['problem'][0][solver_stats['pyomo'][name]] for name in solver_stats['pyomo']
}
else: # gurobi model
if not prt:
self.cm.gm.setParam('OutputFlag',0)
self.cm.gm.optimize()
self.res_time['solver'] = self.cm.gm.Runtime
self.res_stats = {
name:self.cm.gm.getAttr(solver_stats['gurobi'][name]) for name in solver_stats['gurobi']
}
print(' ----- FINISHED SOLVER -----')
def post_process(self):
""" Post-processing of optimization results and plotting of figures """
print('----- POST PROCESS ------')
prt = self.opt_print['postprocess']
############### RESULTS ##########################
self.res_residuals = {} # residuals to check supply == demand
self.res_rmse_area = pd.DataFrame(dtype=float,index=self.areas,columns=['Prod','Hydro','Thermal','Nuclear','Price'])
self.res_rmse_intcon = pd.DataFrame(index=self.xtrans_int.index,columns=['From','To','RMSE'])
# self.res_rmse_intcon.loc[:,['From','To']] = self.xtrans_int.loc[:,['from','to']]
self.res_rmse_intcon['From'] = self.xtrans_int['from']
self.res_rmse_intcon['To'] = self.xtrans_int['to']
self.res_rmse_extcon = pd.DataFrame(index=self.xtrans_ext.index,columns=['From','To','RMSE'])
# self.res_rmse_extcon.loc[:,['From','To']] = self.xtrans_ext.loc[:,['from','to']]
self.res_rmse_extcon['From'] = self.xtrans_ext['from']
self.res_rmse_extcon['To'] = self.xtrans_ext['to']
self.res_rmse_area_norm = self.res_rmse_area.copy()
self.res_rmse_intcon_norm = | pd.Series(index=self.xtrans_int.index) | pandas.Series |
import spacy
# from collections import defaultdict
nlp = spacy.load('en_core_web_lg')
import pandas as pd
import sys
# import random
import pickle
import numpy as np
import ast
import time
start_time = time.time()
all_sr = ['bpd', 'cfs','crohnsdisease', 'dementia', 'depression',\
'diabetes', 'dysautonomia', 'gastroparesis','hypothyroidism', 'ibs', \
'interstitialcystitis', 'kidneystones', 'menieres', 'multiplesclerosis',\
'parkinsons', 'psoriasis', 'rheumatoid', 'sleepapnea']
etype = "DL"
sample = "_sample"
sym_file = "data/entities/{}/{}_symptom_mappings.csv".format(etype, etype)
drug_file = "data/entities/{}/{}_drugs_mappings.csv".format(etype, etype)
features_file = "data/features/{}_embdedded_features{}.pckl".format(etype, sample)
sym = | pd.read_csv(sym_file) | pandas.read_csv |
"""A collection of shared utilities for all encoders, not intended for external use."""
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
__author__ = 'willmcginnis'
def get_obj_cols(df):
obj_cols = []
for idx, dt in enumerate(df.dtypes):
if dt == 'object':
obj_cols.append(df.columns.values[idx])
return obj_cols
def convert_input(X):
if not isinstance(X, pd.DataFrame):
if isinstance(X, list):
X = pd.DataFrame(np.array(X))
elif isinstance(X, (np.generic, np.ndarray)):
X = pd.DataFrame(X)
elif isinstance(X, csr_matrix):
X = pd.DataFrame(X.todense())
else:
raise ValueError('Unexpected input type: %s' % (str(type(X))))
X = X.apply(lambda x: | pd.to_numeric(x, errors='ignore') | pandas.to_numeric |
"""Functions used in the provincial and national-scale network failure analysis
"""
import ast
import copy
import csv
import itertools
import math
import operator
import os
import sys
import json
import igraph as ig
import networkx as nx
from collections import defaultdict
from itertools import chain
import numpy as np
import pandas as pd
from atra.utils import *
from tqdm import tqdm
def spatial_scenario_selection(network_shapefile,
polygon_dataframe, hazard_dictionary,
data_dictionary,network_id_column,network_type ='nodes'):
"""Intersect network edges/nodes and boundary Polygons to collect boundary and hazard attributes
Parameters
- network_shapefile - Shapefile of edge LineStrings or node Points
- polygon_shapefile - Shapefile of boundary Polygons
- hazard_dictionary - Dictionary of hazard attributes
- data_dictionary - Dictionary of network-hazard-boundary intersection attributes
- network_type - String value -'edges' or 'nodes' - Default = 'nodes'
- name_province - String name of province if needed - Default = ''
Outputs
data_dictionary - Dictionary of network-hazard-boundary intersection attributes:
- edge_id/node_id - String name of intersecting edge ID or node ID
- length - Float length of intersection of edge LineString and hazard Polygon: Only for edges
- province_id - String/Integer ID of Province
- province_name - String name of Province in English
- district_id - String/Integer ID of District
- district_name - String name of District in English
- commune_id - String/Integer ID of Commune
- commune_name - String name of Commune in English
- hazard_attributes - Dictionary of all attributes from hazard dictionary
"""
line_gpd = gpd.read_file(network_shapefile)
poly_gpd = polygon_dataframe
if len(line_gpd.index) > 0 and len(poly_gpd.index) > 0:
print (network_shapefile,len(line_gpd.index),len(poly_gpd.index))
line_gpd.columns = map(str.lower, line_gpd.columns)
poly_gpd.columns = map(str.lower, poly_gpd.columns)
# create spatial index
poly_sindex = poly_gpd.sindex
poly_sindex = poly_gpd.sindex
for l_index, lines in line_gpd.iterrows():
intersected_polys = poly_gpd.iloc[list(
poly_sindex.intersection(lines.geometry.bounds))]
for p_index, poly in intersected_polys.iterrows():
if (lines['geometry'].intersects(poly['geometry']) is True) and (poly.geometry.is_valid is True) and (lines.geometry.is_valid is True):
if network_type == 'edges':
value_dictionary = {network_id_column: lines[network_id_column],
'length': 1000.0*line_length(lines['geometry'].intersection(poly['geometry'])),
'province_id': poly['province_id'], 'province_name': poly['province_name'],
'department_id': poly['department_id'], 'department_name': poly['department_name']}
elif network_type == 'nodes':
value_dictionary = {network_id_column: lines[network_id_column],
'province_id': poly['province_id'], 'province_name': poly['province_name'],
'department_id': poly['department_id'], 'department_name': poly['department_name']}
data_dictionary.append({**value_dictionary, **hazard_dictionary})
del line_gpd, poly_gpd
return data_dictionary
def combine_hazards_and_network_attributes_and_impacts(hazard_dataframe, network_dataframe,network_id_column):
hazard_dataframe.rename(columns={
'length': 'exposure_length',
'min_depth': 'min_flood_depth',
'max_depth': 'max_flood_depth'
}, inplace=True)
network_dataframe.rename(columns={'length': 'edge_length'}, inplace=True)
network_dataframe['edge_length'] = 1000.0*network_dataframe['edge_length']
all_edge_fail_scenarios = pd.merge(hazard_dataframe, network_dataframe, on=[
network_id_column], how='left').fillna(0)
all_edge_fail_scenarios['percent_exposure'] = 100.0 * \
all_edge_fail_scenarios['exposure_length']/all_edge_fail_scenarios['edge_length']
del hazard_dataframe, network_dataframe
return all_edge_fail_scenarios
def correct_exposures(x,length_thr):
el = float(x.exposure_length)
ep = float(x.percent_exposure)
if ep > 100:
el = 100.0*el/ep
ep = 100.0
if el < length_thr:
return el,ep, 1.0*el/length_thr
else:
return el,ep, 1.0
def change_depth_string_to_number(x):
if 'cm' in x:
return 0.01*float(x.split('cm')[0])
elif 'm' in x:
return 1.0*float(x.split('m')[0])
else:
return x
def create_hazard_scenarios_for_adaptation(all_edge_fail_scenarios, index_cols, length_thr):
tqdm.pandas()
all_edge_fail_scenarios['min_flood_depth'] = all_edge_fail_scenarios.min_flood_depth.progress_apply(
lambda x:change_depth_string_to_number(x))
all_edge_fail_scenarios['max_flood_depth'] = all_edge_fail_scenarios.max_flood_depth.progress_apply(
lambda x:change_depth_string_to_number(x))
min_height_prob = all_edge_fail_scenarios.groupby(index_cols)['min_flood_depth',
'probability'].min().reset_index()
min_height_prob.rename(columns={'probability': 'min_probability'},inplace=True)
max_height_prob = all_edge_fail_scenarios.groupby(index_cols)['max_flood_depth',
'probability'].max().reset_index()
max_height_prob.rename(columns={'probability': 'max_probability'},inplace=True)
min_max_height_prob = pd.merge(min_height_prob,max_height_prob,how='left',on=index_cols)
del min_height_prob,max_height_prob
prob_exposures = all_edge_fail_scenarios.groupby(index_cols + ['probability'])['percent_exposure',
'exposure_length'].sum().reset_index()
del all_edge_fail_scenarios
prob_exposures['exposures_risk'] = prob_exposures.progress_apply(lambda x: correct_exposures(x,length_thr),axis=1)
prob_exposures[['exposure_length','percent_exposure','risk_wt']] = prob_exposures['exposures_risk'].apply(pd.Series)
min_exposures = prob_exposures[index_cols + \
['exposure_length',
'percent_exposure']].groupby(index_cols)['exposure_length',
'percent_exposure'].min().reset_index()
min_exposures.rename(columns={'exposure_length':'min_exposure_length','percent_exposure':'min_exposure_percent'},inplace=True)
max_exposures = prob_exposures[index_cols + \
['exposure_length',
'percent_exposure']].groupby(index_cols)['exposure_length',
'percent_exposure'].max().reset_index()
max_exposures.rename(columns={'exposure_length':'max_exposure_length','percent_exposure':'max_exposure_percent'},inplace=True)
exposures = pd.merge(min_exposures,max_exposures,how='left',on=index_cols).fillna(0)
del min_exposures,max_exposures
height_prob_exposures = pd.merge(min_max_height_prob,exposures,how='left',on=index_cols).fillna(0)
del min_max_height_prob, exposures
prob_exposures = prob_exposures.set_index(index_cols)
scenarios = list(set(prob_exposures.index.values.tolist()))
t = len(scenarios)
print('Number of failure scenarios',t)
scenarios_list = []
l = 0
for sc in scenarios:
l+= 1
prob_tup = [tuple(x) for x in prob_exposures.loc[[sc], ['probability','exposure_length','risk_wt']].values.tolist()]
if len(prob_tup) > 1:
prob_tup = [(w,x,y) for (w,x,y) in sorted(prob_tup, key=lambda pair: pair[0])]
risk_wt = 0
dam_wt = 0
for p in range(len(prob_tup)-1):
risk_wt += 0.5*(prob_tup[p+1][0]-prob_tup[p][0])*(prob_tup[p+1][-1]+prob_tup[p][-1])
dam_wt += 0.5*(prob_tup[p+1][0]-prob_tup[p][0])*(prob_tup[p+1][1]+prob_tup[p][1])
else:
risk_wt = prob_tup[0][0]*prob_tup[0][-1]
dam_wt = prob_tup[0][0]*prob_tup[0][1]
scenarios_list.append(list(sc) + [risk_wt, dam_wt])
print ('Done with scenario {} out of {}'.format(l,t))
new_cols = ['risk_wt', 'dam_wt']
scenarios_df = pd.DataFrame(scenarios_list, columns=index_cols + new_cols)
scenarios_df = pd.merge(scenarios_df,height_prob_exposures,how='left',on=index_cols).fillna(0)
del scenarios_list,height_prob_exposures
return scenarios_df
def swap_min_max(x, min_col, max_col):
"""Swap columns if necessary
"""
if x[min_col] < 0 and x[max_col] < 0:
if abs(x[min_col]) > abs(x[max_col]):
return x[max_col], x[min_col]
else:
return x[min_col], x[max_col]
else:
if x[min_col] > x[max_col]:
return x[max_col], x[min_col]
else:
return x[min_col], x[max_col]
def add_igraph_generalised_costs(G, vehicle_numbers, tonnage):
# G.es['max_cost'] = list(cost_param*(np.array(G.es['length'])/np.array(G.es['max_speed'])))
# G.es['min_cost'] = list(cost_param*(np.array(G.es['length'])/np.array(G.es['min_speed'])))
# print (G.es['max_time'])
G.es['max_gcost'] = list(
vehicle_numbers * np.array(G.es['max_time_cost'])
+ tonnage * np.array(G.es['max_tariff_cost'])
)
G.es['min_gcost'] = list(
vehicle_numbers * np.array(G.es['min_time_cost'])
+ tonnage * np.array(G.es['min_tariff_cost'])
)
return G
def add_dataframe_generalised_costs(G, vehicle_numbers, tonnage):
# G.es['max_cost'] = list(cost_param*(np.array(G.es['length'])/np.array(G.es['max_speed'])))
# G.es['min_cost'] = list(cost_param*(np.array(G.es['length'])/np.array(G.es['min_speed'])))
# print (G.es['max_time'])
G['max_gcost'] = list(
vehicle_numbers * np.array(G['max_time_cost'])
+ tonnage * np.array(G['max_tariff_cost'])
)
G['min_gcost'] = list(
vehicle_numbers * np.array(G['min_time_cost'])
+ tonnage * np.array(G['min_tariff_cost'])
)
return G
def network_od_path_estimations(graph,
source, target, cost_criteria, time_criteria):
"""Estimate the paths, distances, times, and costs for given OD pair
Parameters
---------
graph
igraph network structure
source
String/Float/Integer name of Origin node ID
source
String/Float/Integer name of Destination node ID
tonnage : float
value of tonnage
vehicle_weight : float
unit weight of vehicle
cost_criteria : str
name of generalised cost criteria to be used: min_gcost or max_gcost
time_criteria : str
name of time criteria to be used: min_time or max_time
fixed_cost : bool
Returns
-------
edge_path_list : list[list]
nested lists of Strings/Floats/Integers of edge ID's in routes
path_dist_list : list[float]
estimated distances of routes
path_time_list : list[float]
estimated times of routes
path_gcost_list : list[float]
estimated generalised costs of routes
"""
paths = graph.get_shortest_paths(source, target, weights=cost_criteria, output="epath")
edge_path_list = []
path_dist_list = []
path_time_list = []
path_gcost_list = []
for path in paths:
edge_path = []
path_dist = 0
path_time = 0
path_gcost = 0
if path:
for n in path:
edge_path.append(graph.es[n]['edge_id'])
path_dist += graph.es[n]['length']
path_time += graph.es[n][time_criteria]
path_gcost += graph.es[n][cost_criteria]
edge_path_list.append(edge_path)
path_dist_list.append(path_dist)
path_time_list.append(path_time)
path_gcost_list.append(path_gcost)
return edge_path_list, path_dist_list, path_time_list, path_gcost_list
def write_flow_paths_to_network_files(save_paths_df,
min_industry_columns,
max_industry_columns,
gdf_edges, save_csv=True, save_shapes=True, shape_output_path='',csv_output_path=''):
"""Write results to Shapefiles
Outputs ``gdf_edges`` - a shapefile with minimum and maximum tonnage flows of all
commodities/industries for each edge of network.
Parameters
---------
save_paths_df
Pandas DataFrame of OD flow paths and their tonnages
industry_columns
List of string names of all OD commodities/industries indentified
min_max_exist
List of string names of commodity/industry columns for which min-max tonnage column names already exist
gdf_edges
GeoDataFrame of network edge set
save_csv
Boolean condition to tell code to save created edge csv file
save_shapes
Boolean condition to tell code to save created edge shapefile
shape_output_path
Path where the output shapefile will be stored
csv_output_path
Path where the output csv file will be stored
"""
edge_min_path_index = defaultdict(list)
edge_max_path_index = defaultdict(list)
for row in save_paths_df.itertuples():
for item in row.min_edge_path:
edge_min_path_index[item].append(row.Index)
for item in row.max_edge_path:
edge_max_path_index[item].append(row.Index)
edge_flows_min = []
edge_flows_max = []
for vals in edge_min_path_index.keys():
edge_flows = pd.DataFrame(list(zip([vals]*len(edge_min_path_index[vals]),edge_min_path_index[vals])),columns=['edge_id','path_index']).set_index('path_index')
edge_flows = edge_flows.join(save_paths_df, how='left').fillna(0)
edge_flows_min.append(edge_flows[['edge_id'] + min_industry_columns].groupby('edge_id')[min_industry_columns].sum().reset_index())
print ('Done with edge {} for min'.format(vals))
for vals in edge_max_path_index.keys():
edge_flows = pd.DataFrame(list(zip([vals]*len(edge_max_path_index[vals]),edge_max_path_index[vals])),columns=['edge_id','path_index']).set_index('path_index')
edge_flows = edge_flows.join(save_paths_df, how='left').fillna(0)
edge_flows_max.append(edge_flows[['edge_id'] + max_industry_columns].groupby('edge_id')[max_industry_columns].sum().reset_index())
print ('Done with edge {} for max'.format(vals))
if len(edge_flows_min) == 1:
edge_flows_min = edge_flows_min[0]
elif len(edge_flows_min) > 1:
edge_flows_min = pd.concat(edge_flows_min,axis=0,sort='False', ignore_index=True).groupby('edge_id')[min_industry_columns].sum().reset_index()
# print (edge_flows_min)
if len(edge_flows_max) == 1:
edge_flows_max = edge_flows_max[0]
elif len(edge_flows_max) > 1:
edge_flows_max = pd.concat(edge_flows_max,axis=0,sort='False', ignore_index=True).groupby('edge_id')[max_industry_columns].sum().reset_index()
# print (edge_flows_max)
if min_industry_columns == max_industry_columns:
for ind in min_industry_columns:
edge_flows_min.rename(columns={ind:'min_'+ind},inplace=True)
edge_flows_max.rename(columns={ind:'max_'+ind},inplace=True)
edge_flows = | pd.merge(edge_flows_min,edge_flows_max,how='left',on=['edge_id']) | pandas.merge |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = | pd.read_json(data, orient='split') | pandas.read_json |
import math
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib2tikz import save as tikz_save
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
from parameter import Parameter
from stationMapUtrBijlAsdZShl import StationMapUtrBijlAsdZShl
class PostProcessing(object):
def __init__(self, simulationResultSet, param):
#contains a set of simulationOutput objects
self.simResults = simulationResultSet
self.param = param
self.numSimOutput = Parameter.numSimRuns*Parameter.finalInstances
self.outputPath = param.outputFolder + '/' + param.postProcessFolder
if Parameter.savePlots:
#generate time ticks for time plots
self.generateTimeTicks()
#preprocessing
self.computeAverageAreaDensities()
self.loadAreaAttributes()
#generate plots
self.plotStreamFlows()
self.plotAreaDensities()
self.plotStationPlatformOccupancy()
self.plotUtilityDistribution()
self.plotRidershipDistribution()
self.plotFacilityLOSDistribution()
self.plotStationLOSEvolution()
#validation plots
self.compareGateStreamFlows()
self.compareEquippedLinkFlows()
self.aggregatePlatformFlows()
self.comparePlatformDensities()
#heatmaps
self.plotHeatmap()
def generateTimeTicks(self):
self.timeTickPos = [self.param.analysisStart + x*Parameter.analysisPlotInterval \
for x in range( int( (self.param.analysisEnd-self.param.analysisStart)/Parameter.analysisPlotInterval ) + 1) ]
self.timeTickLabel = list()
for totalSec in self.timeTickPos:
m, s = divmod(totalSec, 60)
h, m = divmod(m, 60)
assert(s==0), "xTickLabels should be chosen such that they do not contain seconds. (%s corresponds to %d:%02d:%02d)" % \
(totalSec, h, m, s)
self.timeTickLabel.append("%d:%02d" % (h, m) )
def configureTimeAxis(self, ax):
plt.xlim(self.param.analysisStart, self.param.analysisEnd)
ax.set_xticks(self.timeTickPos)
ax.set_xticklabels(self.timeTickLabel)
def generateFigure(self, filename, fig, tikz=None):
if tikz is None:
drawTikz = Parameter.exportTIKZ
else:
assert(tikz == True or tikz == False)
drawTikz = tikz
if Parameter.savePlots: plt.savefig(filename + '.pdf', bbox_inches='tight')
if drawTikz: tikz_save(filename + '.tex', figure=fig, show_info=False)
def plotStationLOSEvolution(self):
#container of the form usersPerServiceLevel[stationName][serviceLevel][timeStep]
usersPerServiceLevel = dict()
#initialize
for stationName in Parameter.stationNameDict.values():
usersPerServiceLevel[stationName] = {serviceLevel:[0]*Parameter.numTimePointsAnalysis \
for serviceLevel in Parameter.losLevels}
for areaName in Parameter.plotStationLOSEvolutionAreas:
stationName = self.areaStationNameDict[areaName]
areaType = self.areaTypeDict[areaName]
areaSize = self.areaSizeDict[areaName]
for timeStep in range(0,Parameter.numTimePointsAnalysis):
curDensity = self.avgAreaDensityDict[areaName][timeStep]
curOccupation = curDensity*areaSize
curServiceLevel = self.getLOS(areaType, curDensity)
usersPerServiceLevel[stationName][curServiceLevel][timeStep] += curOccupation
for stationName in usersPerServiceLevel.keys():
occupList = list()
colorList = list()
for serviceLevel in Parameter.losLevels:
occupList.append( usersPerServiceLevel[stationName][serviceLevel] )
colorList.append( Parameter.losLevelColors[serviceLevel] )
figLOSDist = plt.figure()
ax = figLOSDist.add_subplot(1, 1, 1)
plt.stackplot(self.param.timePointsAnalysis, occupList, labels=Parameter.losLevels, colors=colorList)
self.configureTimeAxis(ax)
plt.ylabel('number of pedestrians')
plt.title(stationName)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], title='LOS')
filenameLOSDist = "%s/serviceDistr_%s" % (self.outputPath, stationName)
self.generateFigure(filenameLOSDist, figLOSDist, tikz=False)
#generate text output since figure cannot be exported as tikz
filename = self.outputPath + '/serviceDistr_' + stationName
#generate header of text output file
servDistStr = "timePoints"
for label in Parameter.losLevels:
servDistStr += " %s" % label
servDistStr += "\n"
for timeIndex in range(0,len(self.param.timePointsAnalysis)):
servDistStr += "%s " % self.param.timePointsAnalysis[timeIndex]
for servLevelIndex in range(0,len(Parameter.losLevels)):
servDistStr += "%s " % occupList[servLevelIndex][timeIndex]
servDistStr += "\n"
with open(filename + ".txt", "w") as text_file:
print(servDistStr, file=text_file)
#close figure to reduce memory usage
plt.close("all")
def plotFacilityLOSDistribution(self):
assert( isinstance(Parameter.plotAreasLOSDistribution,list) ), "Type of Parameter.plotAreasLOSDistribution is %s, but should be 'list.'" % \
type(Parameter.plotAreasLOSDistribution)
losObsDict = dict() #histogram of observed LOS levels (infrastructure-oriented)
losObsExpDict = dict() #histogram of experienced LOS levels (passenger-oriented)
for areaName in Parameter.plotAreasLOSDistribution:
areaType = self.areaTypeDict[areaName]
densityList = self.avgAreaDensityDict[areaName]
losObsDict[areaName] = {losLevel:0 for losLevel in Parameter.losLevels}
losObsExpDict[areaName] = {losLevel:0 for losLevel in Parameter.losLevels}
cumDensArea = sum(densityList)
if cumDensArea == 0:
print("Warning: Area %s has zero density during entire analysis period" % areaName)
for curDens in densityList:
curLOS = self.getLOS(areaType, curDens)
losObsDict[areaName][curLOS] += 1/Parameter.numTimePointsAnalysis
if cumDensArea > 0:
losObsExpDict[areaName][curLOS] += curDens/cumDensArea
else:
assert(curDens == 0)
losObsExpDict[areaName][curLOS] += 0
### plot
ind = range(0,len( Parameter.plotAreasLOSDistribution )) # the x locations for areas of interest
width = 0.35 # the width of the bars
#initialize figure infrastructure-based LOS distribution
figLOS, axLOS = plt.subplots()
numObsLOS = [0]*len(Parameter.plotAreasLOSDistribution)
numObsCumLOS = [0]*len(Parameter.plotAreasLOSDistribution)
previousObsLOS = [0]*len(Parameter.plotAreasLOSDistribution)
for serviceLevel in reversed(Parameter.losLevels):
for areaIndex in range(0,len(Parameter.plotAreasLOSDistribution)):
areaName = Parameter.plotAreasLOSDistribution[areaIndex]
numObsLOS[areaIndex] = losObsDict[areaName][serviceLevel]
numObsCumLOS[areaIndex] += previousObsLOS[areaIndex]
previousObsLOS[areaIndex] = losObsDict[areaName][serviceLevel]
axLOS.bar(ind, numObsLOS, width, label=serviceLevel, bottom=numObsCumLOS, color=Parameter.losLevelColors[serviceLevel])
#set title and xticks
axLOS.set_title('Infrastructure-oriented level-of-service')
axLOS.set_xticks(ind)
axLOS.set_xticklabels(Parameter.plotAreasLOSDistribution)
#show legend entries in correct order
handles, labels = axLOS.get_legend_handles_labels()
axLOS.legend(handles[::-1], labels[::-1])
#show y-axis in percent
vals = axLOS.get_yticks()
axLOS.set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
#save figure
filenameLOS = self.outputPath + "/losInfrastructure"
self.generateFigure(filenameLOS, figLOS)
#initialize figure experienced LOS
figExpLOS, axExpLOS = plt.subplots()
numObsExpLOS = [0]*len(Parameter.plotAreasLOSDistribution)
numObsCumExpLOS = [0]*len(Parameter.plotAreasLOSDistribution)
previousObsExpLOS = [0]*len(Parameter.plotAreasLOSDistribution)
for serviceLevel in reversed(Parameter.losLevels):
for areaIndex in range(0,len(Parameter.plotAreasLOSDistribution)):
areaName = Parameter.plotAreasLOSDistribution[areaIndex]
numObsExpLOS[areaIndex] = losObsExpDict[areaName][serviceLevel]
numObsCumExpLOS[areaIndex] += previousObsExpLOS[areaIndex]
previousObsExpLOS[areaIndex] = losObsExpDict[areaName][serviceLevel]
axExpLOS.bar(ind, numObsExpLOS, width, label=serviceLevel, bottom=numObsCumExpLOS, color=Parameter.losLevelColors[serviceLevel])
#set title and xticks
axExpLOS.set_title('Experienced level-of-service')
axLOS.set_xticks(ind)
axExpLOS.set_xticklabels(Parameter.plotAreasLOSDistribution)
#show legend entries in correct order
handles, labels = axExpLOS.get_legend_handles_labels()
axExpLOS.legend(handles[::-1], labels[::-1])
#show y-axis in percent
vals = axExpLOS.get_yticks()
axExpLOS.set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
#save figure
filenameExpLOS = self.outputPath + '/losExperienced'
self.generateFigure(filenameExpLOS, figExpLOS)
#close figure to reduce memory usage
plt.close("all")
# #initialize figures
# figLOS = plt.figure()
# axLOS = figLOS.add_subplot()
#
# figExpLOS = plt.figure()
# axExpLOS = figExpLOS.add_subplot()
#
# numObsLOS = [0]*len(Parameter.plotAreasLOSDistribution)
# numObsCumLOS = [0]*len(Parameter.plotAreasLOSDistribution)
# previousObsLOS = [0]*len(Parameter.plotAreasLOSDistribution)
#
# numObsExpLOS = [0]*len(Parameter.plotAreasLOSDistribution)
# numObsCumExpLOS = [0]*len(Parameter.plotAreasLOSDistribution)
# previousObsExpLOS = [0]*len(Parameter.plotAreasLOSDistribution)
#
# for serviceLevel in reversed(Parameter.losLevels):
#
# for areaIndex in range(0,len(Parameter.plotAreasLOSDistribution)):
# areaName = Parameter.plotAreasLOSDistribution[areaIndex]
#
# numObsLOS[areaIndex] = losObsDict[areaName][serviceLevel]
# numObsCumLOS[areaIndex] += previousObsLOS[areaIndex]
# previousObsLOS[areaIndex] = losObsDict[areaName][serviceLevel]
#
# numObsExpLOS[areaIndex] = losObsExpDict[areaName][serviceLevel]
# numObsCumExpLOS[areaIndex] += previousObsExpLOS[areaIndex]
# previousObsExpLOS[areaIndex] = losObsExpDict[areaName][serviceLevel]
#
# axLOS.bar(ind, numObsLOS, width, label=serviceLevel, bottom=numObsCumLOS, color=Parameter.losLevelColors[serviceLevel])
# axExpLOS.bar(ind, numObsExpLOS, width, label=serviceLevel, bottom=numObsCumExpLOS, color=Parameter.losLevelColors[serviceLevel])
#
# #set title and xticks
# axLOS.set_title('Infrastructure-oriented level-of-service')
# axLOS.set_xticks(ind)
# axLOS.set_xticklabels(Parameter.plotAreasLOSDistribution)
#
# axExpLOS.set_title('Experienced level-of-service')
# axLOS.set_xticks(ind)
# axExpLOS.set_xticklabels(Parameter.plotAreasLOSDistribution)
#
# #show legend entries in correct order
# handles, labels = axLOS.get_legend_handles_labels()
# axLOS.legend(handles[::-1], labels[::-1])
#
# handles, labels = axExpLOS.get_legend_handles_labels()
# axExpLOS.legend(handles[::-1], labels[::-1])
#
# #show y-axis in percent
# vals = axLOS.get_yticks()
# axLOS.set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
#
# vals = axExpLOS.get_yticks()
# axExpLOS.set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
#
# #save figure
# filenameLOS = 'output/losInfrastructure'
# self.generateFigure(filenameLOS, figLOS)
#
# filenameExpLOS = 'output/losExperienced'
# self.generateFigure(filenameExpLOS, figExpLOS)
def loadAreaAttributes(self):
stationMap = StationMapUtrBijlAsdZShl()
areaSet = stationMap.areas
self.areaTypeDict = dict()
self.areaSizeDict = dict()
self.areaStationNameDict = dict()
for (areaName, areaSize, areaType, stationName) in areaSet:
self.areaTypeDict[areaName] = areaType
self.areaSizeDict[areaName] = areaSize
self.areaStationNameDict[areaName] = stationName
def computeAverageAreaDensities(self):
#dict containing for each areaName the vector of average densities (corresponding to Parameter.numTiePointsAnalysis)
self.avgAreaDensityDict = dict()
#compute average densities of all areas
for simOutput in self.simResults:
for areaName, densityList in simOutput.areaDensityDict.items():
#initialize average density list for each area
if areaName not in self.avgAreaDensityDict.keys():
self.avgAreaDensityDict[areaName] = [0]*Parameter.numTimePointsAnalysis
#add weighted densities
self.avgAreaDensityDict[areaName] = [x[0]+x[1]/self.numSimOutput for x in zip(self.avgAreaDensityDict[areaName],densityList)]
def plotHeatmap(self):
#import area coordinates and attributes
areaCoords = pd.read_csv(Parameter.filenameAreaCoord)
#initialize map
figHeatMap = plt.subplots()
#each area represents a polygon and has areaType and associated stationName
polygonDict = dict()
areaTypeDict = dict()
areaStationNameDict = dict()
xCoord = list()
yCoord = list()
stationAreaSets = dict()
stationSet = set()
#load each area
for _, octagon in areaCoords.iterrows():
#polygon = Polygon(np.random.rand(N,2), True)
xCoord.clear()
yCoord.clear()
polygonName = octagon['areaName']
polygonAreaType = octagon['areaType']
polygonStationName = octagon['stationName']
xCoord.append( octagon['x1'] )
xCoord.append( octagon['x2'] )
xCoord.append( octagon['x3'] )
xCoord.append( octagon['x4'] )
yCoord.append( octagon['y1'] )
yCoord.append( octagon['y2'] )
yCoord.append( octagon['y3'] )
yCoord.append( octagon['y4'] )
#add more polygon corners if needed
if not math.isnan(octagon['x5']):
xCoord.append( octagon['x5'] )
yCoord.append( octagon['y5'] )
if not math.isnan(octagon['x6']):
xCoord.append( octagon['x6'] )
yCoord.append( octagon['y6'] )
if not math.isnan(octagon['x7']):
xCoord.append( octagon['x7'] )
yCoord.append( octagon['y7'] )
if not math.isnan(octagon['x8']):
xCoord.append( octagon['x8'] )
yCoord.append( octagon['y8'] )
(xCoordCorr, yCoordCorr) = self.rotateMoveCoord(xCoord, yCoord, polygonStationName)
coordMatrix = np.column_stack((xCoordCorr, yCoordCorr))
#initialize all polygon with light-gray face and black edge
polygon = Polygon(coordMatrix, edgecolor='k', facecolor='0.9', linewidth='0.1')
polygonDict[polygonName] = polygon
areaTypeDict[polygonName] = polygonAreaType
areaStationNameDict[polygonName] = polygonStationName
if polygonName in self.areaTypeDict.keys():
assert(polygonAreaType == self.areaTypeDict[polygonName]), \
"Mismatch in area type for area %s: %s vs. %s" % (polygonName, self.areaTypeDict[polygonName], polygonAreaType)
if polygonName in self.areaStationNameDict.keys():
assert(polygonStationName == self.areaStationNameDict[polygonName]), \
"Mismatch in stationName for area %s: %s vs. %s" % (polygonName, self.areaStationNameDict[polygonName], polygonStationName)
#add area to stationAreaSets
if polygonStationName not in stationSet:
stationSet.add(polygonStationName)
stationAreaSets[polygonStationName] = set()
if not polygonAreaType == "obstacle":
stationAreaSets[polygonStationName].add(polygonName)
plt.gca().add_patch(polygon)
#draw contourlines
for stationName, stationContour in Parameter.contourLines.items():
#draw each line of contour
for contourLine in stationContour:
xList,yList = zip(*contourLine)
xListCorr,yListCorr = self.rotateMoveCoord(xList, yList, stationName)
plt.plot(xListCorr, yListCorr, color='k', linewidth=0.5)
plt.axis('equal')
plt.axis('off')
#get maximum density over time over all areas to scale color map
#maxAvgDens = max([max(areaDensList) for areaDensList in self.avgAreaDensityDict.values()])
#generate density heat map
for timeStep in range(0,Parameter.numTimePointsAnalysis):
for stationName in stationSet:
for areaName in stationAreaSets[stationName]:
polygon = polygonDict[areaName]
density = self.avgAreaDensityDict[areaName][timeStep]
#relDens = density/maxAvgDens
#assert(density <= maxAvgDens)
relDens = min(1,density/Parameter.plotMaxDensity)
#interpolate between white and blue to get color
polygon.set_facecolor([1-relDens, 1-relDens, 1])
figTitle = "%s (maxDens: %.2f)" % (self.param.datePointsAnalysis[timeStep], Parameter.plotMaxDensity)
plt.title(figTitle)
filename = '%s/heatmaps/densityMap-%03d' % (self.outputPath, timeStep)
self.generateFigure(filename, figHeatMap, tikz=False)
#generate LOS map
for timeStep in range(0,Parameter.numTimePointsAnalysis):
for stationName in stationSet:
for areaName in stationAreaSets[stationName]:
polygon = polygonDict[areaName]
areaType = areaTypeDict[areaName]
density = self.avgAreaDensityDict[areaName][timeStep]
serviceLevel = self.getLOS(areaType,density)
serviceColor = Parameter.losLevelColors[serviceLevel]
polygon.set_facecolor(serviceColor)
figTitle = "Level-of-service, %s" % self.param.datePointsAnalysis[timeStep]
plt.title(figTitle)
filename = '%s/heatmaps/losMap-%03d' % (self.outputPath, timeStep)
self.generateFigure(filename, figHeatMap, tikz=False)
#generate average density and LOS map for peak period
numTimeStepsPeak = int( Parameter.peakPeriodDuration // self.param.timeStepAnalysis )
stationPeakPeriod = dict()
peakDensityDict = dict()
peakLOSDict = dict()
maxPeakAvgDensity = 0
#compute average peak conditions for each station
for stationName in stationSet:
occupList = self.platformOccupationDict[stationName]
#determine peak period for each station
maxPeriodOccup = 0
periodStartStep = None
for timeStep in range(0,Parameter.numTimePointsAnalysis-numTimeStepsPeak):
periodOcc = sum(occupList[timeStep:timeStep+numTimeStepsPeak])
if periodOcc > maxPeriodOccup:
periodStartStep = timeStep
maxPeriodOccup = periodOcc
stationPeakPeriod[stationName] = "%s to %s" % (self.param.datePointsAnalysis[periodStartStep], \
self.param.datePointsAnalysis[periodStartStep+numTimeStepsPeak])
#compute average density and LOS during peak period
for areaName in stationAreaSets[stationName]:
densityList = self.avgAreaDensityDict[areaName]
peakMeanDensity = sum(densityList[timeStep:timeStep+numTimeStepsPeak])/numTimeStepsPeak
areaType = areaTypeDict[areaName]
peakMeanLOS = self.getLOS(areaType, peakMeanDensity)
peakDensityDict[areaName] = peakMeanDensity
peakLOSDict[areaName] = peakMeanLOS
if peakMeanDensity > maxPeakAvgDensity:
maxPeakAvgDensity = peakMeanDensity
#draw peak-period density map (for all stations jointly)
figTitle = "Average peak density (max density: %.2f)\n" % maxPeakAvgDensity
for stationName in stationSet:
figTitle += "%s: %s\n" % (stationName, stationPeakPeriod[stationName])
for areaName in stationAreaSets[stationName]:
polygon = polygonDict[areaName]
density = peakDensityDict[areaName]
relDens = density/maxPeakAvgDensity
#interpolate between white and blue to get color
polygon.set_facecolor([1-relDens, 1-relDens, 1])
plt.title(figTitle)
filename = self.outputPath + '/peakDensityMap'
self.generateFigure(filename, figHeatMap, tikz=False)
#draw peak-period LOS map (for all stations jointly)
figTitle = "Average peak level-of-service\n"
for stationName in stationSet:
figTitle += "%s: %s\n" % (stationName, stationPeakPeriod[stationName])
for areaName in stationAreaSets[stationName]:
polygon = polygonDict[areaName]
serviceLevel = peakLOSDict[areaName]
serviceColor = Parameter.losLevelColors[serviceLevel]
polygon.set_facecolor(serviceColor)
plt.title(figTitle)
filename = self.outputPath + '/peakLOSMap'
self.generateFigure(filename, figHeatMap, tikz=False)
#close figure to reduce memory usage
plt.close("all")
def plotRidershipDistribution(self):
trainIDSet = set( next(iter(self.simResults)).totalRidershipLogDict.keys() )
for trainID in trainIDSet:
#flag for non-corridor trains
auxiliaryTrain = False
ridershipTotal = dict()
ridershipTracked = dict()
ridershipAuxiliary = dict()
numVehicles = None
#compute average vehicle ridership
for simOutput in self.simResults:
#initialize numVehicles if necessary
if numVehicles is None:
numVehicles = len( simOutput.totalRidershipLogDict[trainID] )
if auxiliaryTrain:
break
for stationName in Parameter.plotRidershipStations:
if stationName not in simOutput.totalRidershipLogDict[trainID][0]:
auxiliaryTrain = True
break
#initialize ridership lists
if stationName not in ridershipTotal.keys():
ridershipTotal[stationName] = [0]*numVehicles
ridershipTracked[stationName] = [0]*numVehicles
ridershipAuxiliary[stationName] = [0]*numVehicles
for vehID in range(0,numVehicles):
#add weighted estimate of current simulation run
ridershipTotal[stationName][vehID] += \
simOutput.totalRidershipLogDict[trainID][vehID][stationName]/self.numSimOutput
ridershipTracked[stationName][vehID] += \
simOutput.trackedRidershipLogDict[trainID][vehID][stationName]/self.numSimOutput
ridershipAuxiliary[stationName][vehID] += \
simOutput.auxiliaryRidershipLogDict[trainID][vehID][stationName]/self.numSimOutput
#skip train if auxiliary
if auxiliaryTrain:
continue
#generate plots
for stationName in Parameter.plotRidershipStations:
loadTrackedPass = ridershipTracked[stationName]
loadAuxPass = ridershipAuxiliary[stationName]
if numVehicles is None:
numVehicles = len( loadTrackedPass )
ind = range(numVehicles,0,-1) # the x locations for the train vehicles, arranging vehicles in direction of traveling (left to right)
width = 0.9 # the width of the bars: can also be len(x) sequence
curFig = plt.subplots()
pAux = plt.bar(ind, loadAuxPass, width)
pTrack = plt.bar(ind, loadTrackedPass, width, bottom=loadAuxPass)
plt.xlabel('train vehicle')
plt.ylabel('ridership')
plt.title('Ridership by vehicle of train %s in %s (tot = %.1f)' % (trainID, stationName, sum(loadTrackedPass)+sum(loadAuxPass)) )
plt.xticks(ind, range(1,numVehicles+1))
plt.legend((pTrack[0], pAux[0]), ('corridor (N=%.1f)' % sum(loadTrackedPass), 'auxiliary (N=%.1f)' % sum(loadAuxPass) ))
filename = '%s/ridershipDistribution_%s_%s' % (self.outputPath, trainID, stationName)
with open(filename + ".txt", "w") as text_file:
print("loadTrackedPass: %s\n\nloadAuxPass: %s" % (loadTrackedPass, loadAuxPass), file=text_file)
self.generateFigure(filename, curFig, tikz=False)
plt.close()
#close figure to reduce memory usage
plt.close("all")
def plotUtilityDistribution(self):
activityTypes = {'walking','waiting','riding'}
avgUtilityDict = dict()
stDevUtilityDict = dict()
for connection in Parameter.plotConnections:
#curMean =
avgUtilityDict[connection] = dict()
#curStDev =
stDevUtilityDict[connection] = dict()
for simOutput in self.simResults:
for activity in activityTypes:
#initialize if needed
if activity not in avgUtilityDict[connection]:
avgUtilityDict[connection][activity] = 0
stDevUtilityDict[connection][activity] = 0
#add weighted estimate of current simulation run
avgUtilityDict[connection][activity] += simOutput.avgConnUtility[connection][activity]/self.numSimOutput
stDevUtilityDict[connection][activity] += simOutput.stDevConnUtility[connection][activity]/self.numSimOutput
walkCostMean = list()
waitCostMean = list()
walkAndWaitCostMean = list()
rideCostMean = list()
walkCostStDev = list()
waitCostStDev = list()
rideCostStDev = list()
connectionNames = list()
for connection in Parameter.plotConnections:
walkCostMean.append( -avgUtilityDict[connection]['walking'] )
waitCostMean.append( -avgUtilityDict[connection]['waiting'] )
walkAndWaitCostMean.append( -avgUtilityDict[connection]['walking'] - avgUtilityDict[connection]['waiting'] )
rideCostMean.append( -avgUtilityDict[connection]['riding'] )
walkCostStDev.append( stDevUtilityDict[connection]['walking'] )
waitCostStDev.append( stDevUtilityDict[connection]['waiting'] )
rideCostStDev.append( stDevUtilityDict[connection]['riding'] )
connectionNames.append("%s -- %s" % (connection[0],connection[1]))
ind = range(0,len( Parameter.plotConnections )) # the x locations for the connections
width = 0.35 # the width of the bars: can also be len(x) sequence
figBar = plt.subplots()
pWalk = plt.bar(ind, walkCostMean, width, yerr=walkCostStDev)
pWait = plt.bar(ind, waitCostMean, width, bottom=walkCostMean, yerr=waitCostStDev)
pRide = plt.bar(ind, rideCostMean, width, bottom=walkAndWaitCostMean, yerr=rideCostStDev)
plt.ylabel('cost [EUR]')
plt.title('Travel cost by OD-relation and travel activity')
plt.xticks(ind, connectionNames)
plt.legend((pRide[0], pWait[0], pWalk[0]), ('Riding', 'Waiting', 'Walking'))
#plt.show()
filename = self.outputPath + '/travelDisutilityStackedBar'
with open(filename + ".txt", "w") as text_file:
print("avgUtilityDict: %s\n\nstDevUtilityDict: %s" % (avgUtilityDict, stDevUtilityDict), file=text_file)
self.generateFigure(filename, figBar, tikz=False)
#close figure to reduce memory usage
plt.close("all")
def plotStationPlatformOccupancy(self):
self.platformOccupationDict = dict()
for stationName in Parameter.plotStations:
#sum occupation over all simulation runs
totOccupupation = [0]*Parameter.numTimePointsAnalysis
for simOutput in self.simResults:
totOccupupation = [sum(x) for x in zip(totOccupupation, simOutput.stationPlatformOccupationDict[stationName])]
#compute mean simulation over simulations
occupPoints = [totOcc/self.numSimOutput for totOcc in totOccupupation]
#store platform occupation
self.platformOccupationDict[stationName] = occupPoints
curFig, ax = plt.subplots()
plt.plot(self.param.timePointsAnalysis,occupPoints)
plt.ylabel('Number of pedestrians')
plt.title("Occupation on platform area in %s" % stationName)
self.configureTimeAxis(ax)
filename = '%s/platformOccupation_%s' % (self.outputPath, stationName)
self.generateFigure(filename, curFig)
#close figure to reduce memory usage
plt.close("all")
def compareEquippedLinkFlows(self):
#load flow observations
timeName = {'Time': 'timeStamp'}
asdzSens = {'asdz-PF2_escalator In': 'asdzEscIn', 'asdz-PF2_escalator Out': 'asdzEscOut', 'asdz-PF2_stairs In': 'asdzStairsIn', 'asdz-PF2_stairs Out': 'asdzStairsOut'}
utSens = {'ut-Esc_north In': 'utEscNorthIn', 'ut-Esc_north Out': 'utEscNorthOut', 'ut-Esc_south In': 'utEscSouthIn', 'ut-Esc_south Out': 'utEscSouthOut', 'ut-Stairs_north In': 'utStairsNorthIn', 'ut-Stairs_south In': 'utStairsSouthIn', 'ut-Stairs_north Out': 'utStairsNorthOut', 'ut-Stairs_south Out': 'utStairsSouthOut'}
shlSens = {'Perron 2 hellingbaan Noord (AMS_SCHI11) In': 'shlRampNorthIn', 'Perron 2 hellingbaan Noord (AMS_SCHI11) Out': 'shlRampNorthOut', 'Perron 2 roltrap Noord (AMS_SCHI10) In': 'shlEscNorthIn', 'Perron 2 roltrap Noord (AMS_SCHI10) Out': 'shlEscNorthOut', 'Perron 2 roltrap Zuid (AMS_SCHI12) In': 'shlEscStairsSouthIn', 'Perron 2 roltrap Zuid (AMS_SCHI12) Out': 'shlEscStairsSouthOut', 'Perron 2 hellingbaan Zuid (AMS_SCHI9) In': 'shlRampSouthIn', 'Perron 2 hellingbaan Zuid (AMS_SCHI9) Out': 'shlRampSouthOut'}
dateparse = lambda x: | pd.datetime.strptime(x, '%d.%m.%y %H:%M') | pandas.datetime.strptime |
"""Database interface."""
import copy
import glob
from multimethod import multimethod
import natsort
import numpy as np
import os
import pandas as pd
import sqlite3
import typing
import cobmo.data_interface
import mesmo.config
import mesmo.utils
logger = mesmo.config.get_logger(__name__)
def recreate_database():
"""Recreate SQLITE database from SQL schema file and CSV files in the data path / additional data paths."""
# Log message.
mesmo.utils.log_time("recreate MESMO SQLITE database")
# Find CSV files.
# - Using set instead of list to avoid duplicate entries.
data_paths = {
mesmo.config.config['paths']['data'],
*mesmo.config.config['paths']['additional_data']
}
logger.debug("MESMO data paths:\n" + '\n'.join(data_paths))
csv_files = {
csv_file
for data_path in data_paths
for csv_file in glob.glob(os.path.join(data_path, '**', '*.csv'), recursive=True)
if all(
os.path.join(folder, '') not in csv_file
for folder in ['cobmo', 'cobmo_data', *mesmo.config.config['paths']['ignore_data_folders']]
)
}
logger.debug("Found MESMO CSV files:\n" + '\n'.join(csv_files))
# Connect SQLITE database (creates file, if none).
database_connection = sqlite3.connect(mesmo.config.config['paths']['database'])
cursor = database_connection.cursor()
# Remove old data, if any.
cursor.executescript(
"""
PRAGMA writable_schema = 1;
DELETE FROM sqlite_master WHERE type IN ('table', 'index', 'trigger');
PRAGMA writable_schema = 0;
VACUUM;
"""
)
# Recreate SQLITE database schema from SQL schema file.
with open(os.path.join(mesmo.config.base_path, 'mesmo', 'data_schema.sql'), 'r') as database_schema_file:
cursor.executescript(database_schema_file.read())
database_connection.commit()
# Obtain valid table names.
valid_table_names = (
pd.read_sql("SELECT name FROM sqlite_master WHERE type='table'", database_connection).iloc[:, 0].tolist()
)
# Import CSV files into SQLITE database.
mesmo.utils.log_time("import CSV files into SQLITE database")
mesmo.utils.starmap(
import_csv_file,
zip(csv_files),
dict(
valid_table_names=valid_table_names,
database_connection=(
database_connection if not mesmo.config.config['multiprocessing']['run_parallel'] else None
)
)
)
mesmo.utils.log_time("import CSV files into SQLITE database")
# Close SQLITE connection.
cursor.close()
database_connection.close()
# Log message.
mesmo.utils.log_time("recreate MESMO SQLITE database")
# Recreate CoBMo database.
# - Using set instead of list to avoid duplicate entries.
cobmo_data_paths = {
os.path.dirname(csv_file)
for data_path in data_paths
for csv_file in glob.glob(os.path.join(data_path, '**', '*.csv'), recursive=True)
if any(
os.path.join(folder, '') in csv_file
for folder in ['cobmo', 'cobmo_data']
)
}
cobmo.config.config['paths']['additional_data'] = {
*cobmo_data_paths,
*mesmo.config.config['paths']['cobmo_additional_data'],
*cobmo.config.config['paths']['additional_data']
}
cobmo.data_interface.recreate_database()
def import_csv_file(
csv_file,
valid_table_names,
database_connection=None
):
# Obtain database connection.
if database_connection is None:
database_connection = connect_database()
# Obtain table name.
table_name = os.path.splitext(os.path.basename(csv_file))[0]
# Raise exception, if table doesn't exist.
if not (table_name in valid_table_names):
raise NameError(
f"Error loading '{csv_file}' into database, because there is no table named '{table_name}'."
)
# Load table and write to database.
try:
table = pd.read_csv(csv_file, dtype=str)
table.to_sql(
table_name,
con=database_connection,
if_exists='append',
index=False
)
except Exception as exception:
raise ImportError(f"Error loading {csv_file} into database.") from exception
def connect_database() -> sqlite3.Connection:
"""Connect to the database and return connection handle."""
# Recreate database, if no database exists.
if not os.path.isfile(mesmo.config.config['paths']['database']):
logger.debug(f"Database does not exist and is recreated at: {mesmo.config.config['paths']['database']}")
recreate_database()
# Obtain connection handle.
# - Set large timeout to allow concurrent access during parallel processing.
database_connection = sqlite3.connect(mesmo.config.config['paths']['database'], timeout=30.0)
return database_connection
class ScenarioData(mesmo.utils.ObjectBase):
"""Scenario data object."""
scenario: pd.Series
timesteps: pd.Index
parameters: pd.Series
def __init__(
self,
scenario_name: str,
database_connection=None
):
# Obtain database connection.
if database_connection is None:
database_connection = connect_database()
# Obtain parameters.
self.parameters = (
pd.read_sql(
"""
SELECT * FROM parameters
JOIN scenarios USING (parameter_set)
WHERE scenario_name = ?
""",
con=database_connection,
params=[scenario_name],
index_col='parameter_name'
).loc[:, 'parameter_value']
)
# Obtain scenario data.
scenario = (
self.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM scenarios
LEFT JOIN electric_grid_operation_limit_types USING (electric_grid_operation_limit_type)
LEFT JOIN thermal_grid_operation_limit_types USING (thermal_grid_operation_limit_type)
LEFT JOIN trust_region_setting_types USING (trust_region_setting_type)
WHERE scenario_name = ?
""",
con=database_connection,
params=[scenario_name]
))
)
# Raise error, if scenario not found.
if not (len(scenario) > 0):
raise ValueError(f"No scenario found for scenario name '{scenario_name}'.")
# Convert to Series for shorter indexing.
self.scenario = scenario.iloc[0].copy()
# Parse time definitions.
self.scenario['timestep_start'] = (
pd.Timestamp(self.scenario['timestep_start'])
)
self.scenario['timestep_end'] = (
pd.Timestamp(self.scenario['timestep_end'])
)
self.scenario['timestep_interval'] = (
pd.Timedelta(self.scenario['timestep_interval'])
)
# Instantiate timestep series.
self.timesteps = (
pd.Index(
pd.date_range(
start=self.scenario['timestep_start'],
end=self.scenario['timestep_end'],
freq=self.scenario['timestep_interval']
),
name='timestep'
)
)
def parse_parameters_column(
self,
column: np.ndarray
):
"""Parse parameters into one column of a dataframe.
- Replace strings that match `parameter_name` with `parameter_value`.
- Other strings are are directly parsed into numbers.
- If a string doesn't match any match `parameter_name` and cannot be parsed, it is replaced with NaN.
- Expects `column` to be passed as `np.ndarray` rather than directly as `pd.Series` (for performance reasons).
"""
if column.dtype == object: # `object` represents string type.
if any(np.isin(column, self.parameters.index)):
column_values = (
self.parameters.reindex(column).values
)
column_values[pd.isnull(column_values)] = (
pd.to_numeric(column[pd.isnull(column_values)])
)
column = column_values
else:
column = pd.to_numeric(column)
# Explicitly parse to float, for consistent behavior independent of specific values.
column = column.astype(float)
return column
def parse_parameters_dataframe(
self,
dataframe: pd.DataFrame,
excluded_columns: list = None
):
"""Parse parameters into a dataframe.
- Applies `parse_parameters_column` for all string columns.
- Columns in `excluded_columns` are not parsed. By default this includes `_name`, `_type`, `connection` columns.
"""
# Define excluded columns. By default, all columns containing the following strings are excluded:
# `_name`, `_type`, `connection`
if excluded_columns is None:
excluded_columns = ['parameter_set']
excluded_columns.extend(dataframe.columns[dataframe.columns.str.contains('_name')])
excluded_columns.extend(dataframe.columns[dataframe.columns.str.contains('_type')])
excluded_columns.extend(dataframe.columns[dataframe.columns.str.contains('_id')])
excluded_columns.extend(dataframe.columns[dataframe.columns.str.contains('connection')])
excluded_columns.extend(dataframe.columns[dataframe.columns.str.contains('timestep')])
excluded_columns.extend(dataframe.columns[dataframe.columns.str.contains('description')])
# Select non-excluded, string columns and apply `parse_parameters_column`.
selected_columns = (
dataframe.columns[
~dataframe.columns.isin(excluded_columns)
& (dataframe.dtypes == object) # `object` represents string type.
]
)
for column in selected_columns:
dataframe[column] = self.parse_parameters_column(dataframe[column].values)
# Apply scaling.
if 'active_power_nominal' in dataframe.columns:
dataframe.loc[:, 'active_power_nominal'] /= (
self.scenario.at['base_apparent_power']
)
if 'reactive_power_nominal' in dataframe.columns:
dataframe.loc[:, 'reactive_power_nominal'] /= (
self.scenario.at['base_apparent_power']
)
if 'resistance' in dataframe.columns:
dataframe.loc[:, 'resistance'] *= (
self.scenario.at['base_apparent_power']
/ self.scenario.at['base_voltage'] ** 2
)
if 'reactance' in dataframe.columns:
dataframe.loc[:, 'reactance'] *= (
self.scenario.at['base_apparent_power']
/ self.scenario.at['base_voltage'] ** 2
)
if 'capacitance' in dataframe.columns:
dataframe.loc[:, 'capacitance'] *= (
self.scenario.at['base_voltage'] ** 2
/ self.scenario.at['base_apparent_power']
)
if 'maximum_current' in dataframe.columns:
dataframe.loc[:, 'maximum_current'] *= (
self.scenario.at['base_voltage']
/ self.scenario.at['base_apparent_power']
)
if 'voltage' in dataframe.columns:
dataframe.loc[:, 'voltage'] /= (
self.scenario.at['base_voltage']
)
if 'apparent_power' in dataframe.columns:
dataframe.loc[:, 'apparent_power'] /= (
self.scenario.at['base_apparent_power']
)
if 'enthalpy_difference_distribution_water' in dataframe.columns:
dataframe.loc[:, 'enthalpy_difference_distribution_water'] /= (
self.scenario.at['base_thermal_power']
)
# TODO: Align enthalpy variable names (see above & below).
if 'condenser_water_enthalpy_difference' in dataframe.columns:
dataframe.loc[:, 'condenser_water_enthalpy_difference'] /= (
self.scenario.at['base_thermal_power']
)
if 'distribution_pump_efficiency' in dataframe.columns:
dataframe.loc[:, 'distribution_pump_efficiency'] *= (
self.scenario.at['base_thermal_power']
)
if 'plant_pump_efficiency' in dataframe.columns:
dataframe.loc[:, 'plant_pump_efficiency'] *= (
self.scenario.at['base_thermal_power']
)
if 'thermal_power_nominal' in dataframe.columns:
dataframe.loc[:, 'thermal_power_nominal'] /= (
self.scenario.at['base_thermal_power']
)
# If dataframe contains `in_service` column, remove all not-in-service elements.
# - This operation should be last, to avoid pandas warnings for operation on copy of dataframe.
if 'in_service' in dataframe.columns:
dataframe = dataframe.loc[dataframe.loc[:, 'in_service'] == 1, :]
return dataframe
class DERData(mesmo.utils.ObjectBase):
"""DER data object."""
scenario_data: ScenarioData
ders: pd.DataFrame
der_definitions: typing.Dict[str, pd.DataFrame]
@multimethod
def __init__(
self,
scenario_name: str,
database_connection=None
):
# Obtain database connection.
if database_connection is None:
database_connection = connect_database()
# Obtain scenario data.
self.scenario_data = ScenarioData(scenario_name)
# Obtain DERs.
# - Obtain DERs for electric grid / thermal grid separately and perform full outer join via `pandas.merge()`,
# due to SQLITE missing full outer join syntax.
ders = (
pd.merge(
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM electric_grid_ders
WHERE electric_grid_name = (
SELECT electric_grid_name FROM scenarios
WHERE scenario_name = ?
)
""",
con=database_connection,
params=[scenario_name]
)),
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM thermal_grid_ders
WHERE thermal_grid_name = (
SELECT thermal_grid_name FROM scenarios
WHERE scenario_name = ?
)
""",
con=database_connection,
params=[scenario_name]
)),
how='outer',
on=['der_name', 'der_type', 'der_model_name'],
suffixes=('_electric_grid', '_thermal_grid')
)
)
der_models = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM der_models
WHERE (der_type, der_model_name) IN (
SELECT der_type, der_model_name
FROM electric_grid_ders
WHERE electric_grid_name = (
SELECT electric_grid_name FROM scenarios
WHERE scenario_name = ?
)
)
OR (der_type, der_model_name) IN (
SELECT der_type, der_model_name
FROM thermal_grid_ders
WHERE thermal_grid_name = (
SELECT thermal_grid_name FROM scenarios
WHERE scenario_name = ?
)
)
""",
con=database_connection,
params=[
scenario_name,
scenario_name
]
))
)
self.__init__(
scenario_name,
ders,
der_models,
database_connection
)
@multimethod
def __init__(
self,
scenario_name: str,
der_type: str,
der_model_name: str,
database_connection=None
):
# Obtain database connection.
if database_connection is None:
database_connection = connect_database()
# Obtain scenario data.
self.scenario_data = ScenarioData(scenario_name)
# Obtain DERs.
ders = (
pd.DataFrame(
{
'electric_grid_name': None,
'thermal_grid_name': None,
'der_name': der_model_name,
'der_type': der_type,
'der_model_name': der_model_name,
'active_power_nominal': None,
'reactive_power_nominal': None,
'thermal_power_nominal': None
},
index=[0]
)
)
der_models = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM der_models
WHERE der_type = ?
AND der_model_name = ?
""",
con=database_connection,
params=[
der_type,
der_model_name
]
))
)
self.__init__(
scenario_name,
ders,
der_models,
database_connection
)
@multimethod
def __init__(
self,
scenario_name: str,
ders: pd.DataFrame,
der_models: pd.DataFrame,
database_connection=None
):
# Obtain database connection.
if database_connection is None:
database_connection = connect_database()
# Obtain DERs.
self.ders = (
pd.merge(
ders,
der_models,
how='left',
on=['der_type', 'der_model_name'],
)
)
self.ders.index = self.ders['der_name']
self.ders = self.ders.reindex(index=natsort.natsorted(self.ders.index))
# Raise error, if any undefined DER models.
# - That is: `der_model_name` is in `electric_grid_ders` or `thermal_grid_ders`, but not in `der_models`.
# - Except for `flexible_building` models, which are defined through CoBMo.
# TODO: Output DER model names.
if (
~ders.loc[:, 'der_model_name'].isin(der_models.loc[:, 'der_model_name'])
& ~ders.loc[:, 'der_type'].isin(['flexible_building']) # CoBMo models
).any():
raise ValueError(
"Some `der_model_name` in `electric_grid_ders` or `thermal_grid_ders` are not defined in `der_models`."
)
# Obtain unique `definition_type` / `definition_name`.
der_definitions_unique = self.ders.loc[:, ['definition_type', 'definition_name']].drop_duplicates()
der_definitions_unique = der_definitions_unique.dropna(subset=['definition_type'])
# Instantiate DER definitions dictionary.
self.der_definitions = dict.fromkeys(pd.MultiIndex.from_frame(der_definitions_unique))
# Append `definition_index` column to DERs, for more convenient indexing into DER definitions.
self.ders.loc[:, 'definition_index'] = (
pd.MultiIndex.from_frame(self.ders.loc[:, ['definition_type', 'definition_name']]).to_numpy()
)
# Instantiate dict for additional DER definitions, e.g. from `flexible_ev_charger`.
additional_der_definitions = dict()
# Load DER definitions, first for special definition types, e.g. `cooling_plant`, `flexible_ev_charger`.
for definition_index in self.der_definitions:
if definition_index[0] == 'cooling_plant':
self.der_definitions[definition_index] = (
pd.concat([
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM thermal_grids
WHERE thermal_grid_name = (
SELECT thermal_grid_name FROM main.scenarios
WHERE scenario_name = ?
)
""",
con=database_connection,
params=[scenario_name]
)).iloc[0],
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM der_cooling_plants
WHERE definition_name = ?
""",
con=database_connection,
params=[definition_index[1]]
)).iloc[0]
]).drop('thermal_grid_name') # Remove `thermal_grid_name` to avoid duplicate index in `der_models`.
)
elif definition_index[0] == 'flexible_ev_charger':
self.der_definitions[definition_index] = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM der_ev_chargers
WHERE definition_name = ?
""",
con=database_connection,
params=[definition_index[1]]
)).iloc[0]
)
# Append `definition_index`, for more convenient indexing into DER definitions.
# - Add `accumulative` flag to ensure correct interpolation / resampling behavior.
self.der_definitions[definition_index].at['nominal_charging_definition_index'] = (
self.der_definitions[definition_index].at['nominal_charging_definition_type'],
self.der_definitions[definition_index].at['nominal_charging_definition_name']
)
self.der_definitions[definition_index].at['maximum_charging_definition_index'] = (
self.der_definitions[definition_index].at['maximum_charging_definition_type'],
self.der_definitions[definition_index].at['maximum_charging_definition_name']
)
self.der_definitions[definition_index].at['maximum_discharging_definition_index'] = (
self.der_definitions[definition_index].at['maximum_discharging_definition_type'],
self.der_definitions[definition_index].at['maximum_discharging_definition_name']
)
self.der_definitions[definition_index].at['maximum_energy_definition_index'] = (
self.der_definitions[definition_index].at['maximum_energy_definition_type'],
self.der_definitions[definition_index].at['maximum_energy_definition_name']
)
self.der_definitions[definition_index].at['departing_energy_definition_index'] = (
self.der_definitions[definition_index].at['departing_energy_definition_type'] + '_accumulative',
self.der_definitions[definition_index].at['departing_energy_definition_name']
)
# Append arrival / occupancy timeseries / schedule to additional definitions.
additional_der_definitions.update({
self.der_definitions[definition_index].at['nominal_charging_definition_index']: None,
self.der_definitions[definition_index].at['maximum_charging_definition_index']: None,
self.der_definitions[definition_index].at['maximum_discharging_definition_index']: None,
self.der_definitions[definition_index].at['maximum_energy_definition_index']: None,
self.der_definitions[definition_index].at['departing_energy_definition_index']: None
})
# Append additional DER definitions.
self.der_definitions.update(additional_der_definitions)
# Obtain required timestep frequency for schedule resampling / interpolation.
# - Higher frequency is only used when required. This aims to reduce computational burden.
if (
self.scenario_data.scenario.at['timestep_interval']
- self.scenario_data.scenario.at['timestep_interval'].floor('min')
).seconds != 0:
timestep_frequency = 's'
elif (
self.scenario_data.scenario.at['timestep_interval']
- self.scenario_data.scenario.at['timestep_interval'].floor('h')
).seconds != 0:
timestep_frequency = 'min'
else:
timestep_frequency = 'h'
# Load DER definitions, for timeseries / schedule definitions, for each `definition_name`.
if len(self.der_definitions) > 0:
mesmo.utils.log_time('load DER timeseries / schedule definitions')
der_definitions = (
mesmo.utils.starmap(
self.load_der_timeseries_schedules,
zip(
mesmo.utils.chunk_dict(self.der_definitions)
),
dict(
timestep_frequency=timestep_frequency,
timesteps=self.scenario_data.timesteps
)
)
)
for chunk in der_definitions:
self.der_definitions.update(chunk)
mesmo.utils.log_time('load DER timeseries / schedule definitions')
@staticmethod
def load_der_timeseries_schedules(
der_definitions: dict,
timestep_frequency: str,
timesteps
):
timestep_start = timesteps[0]
timestep_end = timesteps[-1]
timestep_interval = timesteps[1] - timesteps[0]
database_connection = connect_database()
der_timeseries_all = (
pd.read_sql(
f"""
SELECT * FROM der_timeseries
WHERE definition_name IN ({','.join(['?'] * len(der_definitions))})
AND time between ? AND ?
""",
con=database_connection,
params=[
*pd.MultiIndex.from_tuples(der_definitions.keys()).get_level_values(1),
timestep_start.strftime('%Y-%m-%dT%H:%M:%S'),
timestep_end.strftime('%Y-%m-%dT%H:%M:%S')
],
parse_dates=['time'],
index_col=['time']
)
)
der_schedules_all = (
pd.read_sql(
f"""
SELECT * FROM der_schedules
WHERE definition_name IN ({','.join(['?'] * len(der_definitions))})
""",
con=database_connection,
params=pd.MultiIndex.from_tuples(der_definitions.keys()).get_level_values(1),
index_col=['time_period']
)
)
for definition_index in der_definitions:
if 'timeseries' in definition_index[0]:
der_timeseries = (
der_timeseries_all.loc[der_timeseries_all.loc[:, 'definition_name'] == definition_index[1], :]
)
if not (len(der_timeseries) > 0):
raise ValueError(f"No DER time series definition found for definition name '{definition_index[1]}'.")
# Resample / interpolate / fill values.
if 'accumulative' in definition_index[0]:
# Resample to scenario timestep interval, using sum to aggregate. Missing values are filled with 0.
der_timeseries = (
der_timeseries.resample(
timestep_interval,
origin=timestep_start
).sum()
)
der_timeseries = (
der_timeseries.reindex(timesteps)
)
# TODO: This overwrites any missing values. No warning is raised.
der_timeseries = der_timeseries.fillna(0.0)
else:
# Resample to scenario timestep interval, using mean to aggregate. Missing values are interpolated.
der_timeseries = (
der_timeseries.resample(
timestep_interval,
origin=timestep_start
).mean()
)
der_timeseries = (
der_timeseries.reindex(timesteps)
)
der_timeseries = der_timeseries.interpolate(method='linear')
# Backward / forward fill up to 1h to handle edge definition gaps.
der_timeseries = (
der_timeseries.bfill(
limit=int(pd.to_timedelta('1h') / timestep_interval)
).ffill(
limit=int(pd.to_timedelta('1h') / timestep_interval)
)
)
# If any NaN values, display warning and fill missing values.
if der_timeseries.isnull().any().any():
logger.warning(
f"Missing values in DER timeseries definition for '{definition_index[1]}'."
f" Please check if appropriate timestep_start/timestep_end are defined."
f" Missing values are filled with 0."
)
# Fill with 0.
der_timeseries = (
der_timeseries.fillna(0.0)
)
der_definitions[definition_index] = der_timeseries
elif 'schedule' in definition_index[0]:
der_schedule = der_schedules_all.loc[der_schedules_all.loc[:, 'definition_name'] == definition_index[1], :]
if not (len(der_schedule) > 0):
raise ValueError(f"No DER schedule definition found for definition name '{definition_index[1]}'.")
# Show warning, if `time_period` does not start with '01T00:00'.
if der_schedule.index[0] != '01T00:00':
logger.warning(
f"First time period is '{der_schedule.index[0]}' in DER schedule with definition name "
f"'{definition_index[1]}'. Schedules should start with time period '01T00:00'. "
f"Please also check if using correct time period format: 'ddTHH:MM'"
)
# Parse time period index.
# - '2001-01-...' is chosen as reference timestep, because '2001-01-01' falls on a Monday.
der_schedule.index = pd.to_datetime('2001-01-' + der_schedule.index)
# Obtain complete schedule for all weekdays.
der_schedule_complete = []
for day in range(1, 8):
if day in der_schedule.index.day.unique():
der_schedule_complete.append(
der_schedule.loc[der_schedule.index.day == day, :]
)
else:
der_schedule_previous = der_schedule_complete[-1].copy()
der_schedule_previous.index += pd.Timedelta('1 day')
der_schedule_complete.append(der_schedule_previous)
der_schedule_complete = pd.concat(der_schedule_complete)
# Resample / interpolate / fill values to obtain complete schedule.
if 'accumulative' in definition_index[0]:
# Resample to scenario timestep interval, using sum to aggregate. Missing values are filled with 0.
der_schedule_complete = (
der_schedule_complete.resample(timestep_interval).sum()
)
der_schedule_complete = (
der_schedule_complete.reindex(
pd.date_range(
start='2001-01-01T00:00',
end='2001-01-07T23:59',
freq=timestep_interval
)
)
)
der_schedule_complete = der_schedule_complete.fillna(0.0)
# Resample to required timestep frequency, foward-filling intermediate values.
# - Ensures that the correct value is used when reindexing to obtain the full timeseries,
# independent of any shift between timeseries and schedule timesteps.
der_schedule_complete = (
der_schedule_complete.resample(timestep_frequency).mean()
)
der_schedule_complete = (
der_schedule_complete.reindex(
pd.date_range(
start='2001-01-01T00:00',
end='2001-01-07T23:59',
freq=timestep_frequency
)
)
)
der_schedule_complete = (
der_schedule_complete.ffill()
)
else:
# Resample to required timestep frequency, using mean to aggregate. Missing values are interpolated.
der_schedule_complete = (
der_schedule_complete.resample(timestep_frequency).mean()
)
der_schedule_complete = (
der_schedule_complete.reindex(
pd.date_range(
start='2001-01-01T00:00',
end='2001-01-07T23:59',
freq=timestep_frequency
)
)
)
der_schedule_complete = der_schedule_complete.interpolate(method='linear')
# Forward fill to handle definition gap at the end of the schedule.
der_schedule_complete = (
der_schedule_complete.ffill()
)
# Reindex / fill schedule for given timesteps.
der_schedule_complete.index = (
pd.MultiIndex.from_arrays([
der_schedule_complete.index.weekday,
der_schedule_complete.index.hour
] + (
[der_schedule_complete.index.minute] if timestep_frequency in ['s', 'min'] else []
) + (
[der_schedule_complete.index.second] if timestep_frequency in ['s'] else []
))
)
der_schedule = (
pd.DataFrame(
index=pd.MultiIndex.from_arrays([
timesteps.weekday,
timesteps.hour
] + (
[timesteps.minute] if timestep_frequency in ['s', 'min'] else []
) + (
[timesteps.second] if timestep_frequency in ['s'] else []
)),
columns=['value']
)
)
der_schedule = (
der_schedule_complete.reindex(der_schedule.index)
)
der_schedule.index = timesteps
der_definitions[definition_index] = der_schedule
return der_definitions
class ElectricGridData(mesmo.utils.ObjectBase):
"""Electric grid data object."""
scenario_data: ScenarioData
electric_grid: pd.DataFrame
electric_grid_nodes: pd.DataFrame
electric_grid_ders: pd.DataFrame
electric_grid_lines: pd.DataFrame
electric_grid_line_types: pd.DataFrame
electric_grid_line_types_overhead: pd.DataFrame
electric_grid_line_types_overhead_conductors: pd.DataFrame
electric_grid_line_types_matrices: pd.DataFrame
electric_grid_transformers: pd.DataFrame
def __init__(
self,
scenario_name: str,
database_connection=None
):
# Obtain database connection.
if database_connection is None:
database_connection = connect_database()
# Obtain scenario data.
self.scenario_data = ScenarioData(scenario_name)
# Obtain electric grid data.
self.electric_grid = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM electric_grids
WHERE electric_grid_name = (
SELECT electric_grid_name FROM scenarios
WHERE scenario_name = ?
)
""",
con=database_connection,
params=[scenario_name]
)).iloc[0]
)
self.electric_grid_nodes = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM electric_grid_nodes
WHERE electric_grid_name = (
SELECT electric_grid_name FROM scenarios
WHERE scenario_name = ?
)
""",
con=database_connection,
params=[scenario_name]
))
)
self.electric_grid_nodes.index = self.electric_grid_nodes['node_name']
self.electric_grid_nodes = (
self.electric_grid_nodes.reindex(index=natsort.natsorted(self.electric_grid_nodes.index))
)
self.electric_grid_ders = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM electric_grid_ders
WHERE electric_grid_name = (
SELECT electric_grid_name FROM scenarios
WHERE scenario_name = ?
)
""",
con=database_connection,
params=[scenario_name]
))
)
self.electric_grid_ders.index = self.electric_grid_ders['der_name']
self.electric_grid_ders = (
self.electric_grid_ders.reindex(index=natsort.natsorted(self.electric_grid_ders.index))
)
self.electric_grid_lines = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM electric_grid_lines
JOIN electric_grid_line_types USING (line_type)
WHERE electric_grid_name = (
SELECT electric_grid_name FROM scenarios
WHERE scenario_name = ?
)
""",
con=database_connection,
params=[scenario_name]
))
)
self.electric_grid_lines.index = self.electric_grid_lines['line_name']
self.electric_grid_lines = (
self.electric_grid_lines.reindex(index=natsort.natsorted(self.electric_grid_lines.index))
)
self.electric_grid_line_types = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM electric_grid_line_types
WHERE line_type IN (
SELECT line_type FROM electric_grid_lines
WHERE electric_grid_name = (
SELECT electric_grid_name FROM scenarios
WHERE scenario_name = ?
)
)
""",
con=database_connection,
params=[scenario_name]
))
)
self.electric_grid_line_types.index = self.electric_grid_line_types['line_type']
self.electric_grid_line_types_overhead = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM electric_grid_line_types_overhead
WHERE line_type IN (
SELECT line_type FROM electric_grid_line_types
WHERE line_type IN (
SELECT line_type FROM electric_grid_lines
WHERE electric_grid_name = (
SELECT electric_grid_name FROM scenarios
WHERE scenario_name = ?
)
)
AND definition_type = 'overhead'
)
""",
con=database_connection,
params=[scenario_name]
))
)
self.electric_grid_line_types_overhead.index = self.electric_grid_line_types_overhead['line_type']
self.electric_grid_line_types_overhead_conductors = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM electric_grid_line_types_overhead_conductors
""",
con=database_connection
))
)
self.electric_grid_line_types_overhead_conductors.index = self.electric_grid_line_types_overhead_conductors['conductor_id']
self.electric_grid_line_types_matrices = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM electric_grid_line_types_matrices
WHERE line_type IN (
SELECT line_type FROM electric_grid_line_types
WHERE line_type IN (
SELECT line_type FROM electric_grid_lines
WHERE electric_grid_name = (
SELECT electric_grid_name FROM scenarios
WHERE scenario_name = ?
)
)
AND definition_type = 'matrix'
)
ORDER BY line_type ASC, row ASC, col ASC
""",
con=database_connection,
params=[scenario_name]
))
)
self.electric_grid_transformers = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM electric_grid_transformers
LEFT JOIN electric_grid_transformer_types USING (transformer_type)
WHERE electric_grid_name = (
SELECT electric_grid_name FROM scenarios
WHERE scenario_name = ?
)
""",
con=database_connection,
params=[scenario_name]
))
)
self.electric_grid_transformers.index = self.electric_grid_transformers['transformer_name']
self.electric_grid_transformers = (
self.electric_grid_transformers.reindex(index=natsort.natsorted(self.electric_grid_transformers.index))
)
class ThermalGridData(mesmo.utils.ObjectBase):
"""Thermal grid data object."""
scenario_data: ScenarioData
thermal_grid: pd.DataFrame
thermal_grid_nodes: pd.DataFrame
thermal_grid_ders: pd.DataFrame
thermal_grid_lines: pd.DataFrame
der_data: DERData
def __init__(
self,
scenario_name: str,
database_connection=None
):
# Obtain database connection.
if database_connection is None:
database_connection = connect_database()
# Obtain scenario data.
self.scenario_data = ScenarioData(scenario_name)
self.thermal_grid = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM thermal_grids
WHERE thermal_grid_name = (
SELECT thermal_grid_name FROM scenarios
WHERE scenario_name = ?
)
""",
con=database_connection,
params=[scenario_name]
)).iloc[0]
)
self.thermal_grid_nodes = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM thermal_grid_nodes
WHERE thermal_grid_name = (
SELECT thermal_grid_name FROM scenarios
WHERE scenario_name = ?
)
""",
con=database_connection,
params=[scenario_name]
))
)
self.thermal_grid_nodes.index = self.thermal_grid_nodes['node_name']
self.thermal_grid_nodes = (
self.thermal_grid_nodes.reindex(index=natsort.natsorted(self.thermal_grid_nodes.index))
)
self.thermal_grid_ders = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM thermal_grid_ders
WHERE thermal_grid_name = (
SELECT thermal_grid_name FROM scenarios
WHERE scenario_name = ?
)
""",
con=database_connection,
params=[scenario_name]
))
)
self.thermal_grid_ders.index = self.thermal_grid_ders['der_name']
self.thermal_grid_ders = (
self.thermal_grid_ders.reindex(index=natsort.natsorted(self.thermal_grid_ders.index))
)
self.thermal_grid_lines = (
self.scenario_data.parse_parameters_dataframe(pd.read_sql(
"""
SELECT * FROM thermal_grid_lines
JOIN thermal_grid_line_types USING (line_type)
WHERE thermal_grid_name = (
SELECT thermal_grid_name FROM scenarios
WHERE scenario_name = ?
)
""",
con=database_connection,
params=[scenario_name]
))
)
self.thermal_grid_lines.index = self.thermal_grid_lines['line_name']
self.thermal_grid_lines = (
self.thermal_grid_lines.reindex(index=natsort.natsorted(self.thermal_grid_lines.index))
)
# Obtain DER data.
self.der_data = (
DERData(
scenario_name,
self.thermal_grid.at['source_der_type'],
self.thermal_grid.at['source_der_model_name'],
database_connection
)
)
class PriceData(mesmo.utils.ObjectBase):
"""Price data object."""
price_sensitivity_coefficient: float
price_timeseries: pd.DataFrame
@multimethod
def __init__(
self,
scenario_name: str,
**kwargs
):
# Obtain DER data.
der_data = DERData(scenario_name)
self.__init__(
scenario_name,
der_data,
**kwargs
)
@multimethod
def __init__(
self,
scenario_name: str,
der_data: DERData,
price_type='',
database_connection=None
):
# Obtain database connection.
if database_connection is None:
database_connection = connect_database()
# Obtain scenario data.
scenario_data = der_data.scenario_data
# Obtain price type.
price_type = scenario_data.scenario.at['price_type'] if price_type == '' else price_type
# Obtain price sensitivity coefficient.
self.price_sensitivity_coefficient = scenario_data.scenario.at['price_sensitivity_coefficient']
# Obtain price timeseries.
if price_type is None:
price_timeseries = (
pd.Series(
1.0,
index=scenario_data.timesteps,
name='price_value'
)
)
else:
price_timeseries = (
pd.read_sql(
"""
SELECT * FROM price_timeseries
WHERE price_type = ?
AND time >= (
SELECT timestep_start FROM scenarios
WHERE scenario_name = ?
)
AND time <= (
SELECT timestep_end FROM scenarios
WHERE scenario_name = ?
)
""",
con=database_connection,
params=[
price_type,
scenario_name,
scenario_name
],
parse_dates=['time'],
index_col=['time']
).reindex(
scenario_data.timesteps
).interpolate(
'ffill'
).bfill( # Backward fill to handle edge definition gaps.
limit=int(pd.to_timedelta('1h') / scenario_data.scenario['timestep_interval'])
).ffill( # Forward fill to handle edge definition gaps.
limit=int(pd.to_timedelta('1h') / scenario_data.scenario['timestep_interval'])
)
).loc[:, 'price_value']
# Obtain price timeseries for each DER.
prices = (
pd.MultiIndex.from_frame(pd.concat([
pd.DataFrame({
'commodity_type': 'active_power',
'der_type': ['source'],
'der_name': ['source']
}) if pd.notnull(scenario_data.scenario.at['electric_grid_name']) else None,
pd.DataFrame({
'commodity_type': 'active_power',
'der_type': der_data.ders.loc[pd.notnull(der_data.ders.loc[:, 'electric_grid_name']), 'der_type'],
'der_name': der_data.ders.loc[pd.notnull(der_data.ders.loc[:, 'electric_grid_name']), 'der_name']
}),
pd.DataFrame({
'commodity_type': 'reactive_power',
'der_type': ['source'],
'der_name': ['source']
}) if pd.notnull(scenario_data.scenario.at['electric_grid_name']) else None,
pd.DataFrame({
'commodity_type': 'reactive_power',
'der_type': der_data.ders.loc[pd.notnull(der_data.ders.loc[:, 'electric_grid_name']), 'der_type'],
'der_name': der_data.ders.loc[pd.notnull(der_data.ders.loc[:, 'electric_grid_name']), 'der_name']
}),
pd.DataFrame({
'commodity_type': 'thermal_power',
'der_type': ['source'],
'der_name': ['source']
}) if | pd.notnull(scenario_data.scenario.at['thermal_grid_name']) | pandas.notnull |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from blocktorch.pipelines.components.transformers.preprocessing import (
DropRowsTransformer,
)
def test_drop_rows_transformer_init():
drop_rows_transformer = DropRowsTransformer()
assert drop_rows_transformer.indices_to_drop is None
drop_rows_transformer = DropRowsTransformer(indices_to_drop=[0, 1])
assert drop_rows_transformer.indices_to_drop == [0, 1]
def test_drop_rows_transformer_init_with_duplicate_indices():
with pytest.raises(ValueError, match="All input indices must be unique."):
DropRowsTransformer(indices_to_drop=[0, 0])
def test_drop_rows_transformer_fit_transform():
X = pd.DataFrame({"a column": [1, 2, 3], "another col": [4, 5, 6]})
X_expected = X.copy()
drop_rows_transformer_none = DropRowsTransformer()
drop_rows_transformer_none.fit(X)
transformed = drop_rows_transformer_none.transform(X)
assert_frame_equal(X, transformed[0])
assert transformed[1] is None
indices_to_drop = [1, 2]
X_expected = pd.DataFrame({"a column": [1], "another col": [4]})
drop_rows_transformer = DropRowsTransformer(indices_to_drop=indices_to_drop)
drop_rows_transformer.fit(X)
transformed = drop_rows_transformer.transform(X)
assert_frame_equal(X_expected, transformed[0])
assert transformed[1] is None
drop_rows_transformer = DropRowsTransformer(indices_to_drop=indices_to_drop)
fit_transformed = drop_rows_transformer.fit_transform(X)
assert_frame_equal(fit_transformed[0], transformed[0])
assert fit_transformed[1] is None
def test_drop_rows_transformer_fit_transform_with_empty_indices_to_drop():
X = pd.DataFrame({"a column": [1, 2, 3], "another col": [4, 5, 6]})
y = pd.Series([1, 0, 1])
drop_rows_transformer = DropRowsTransformer(indices_to_drop=[])
fit_transformed = drop_rows_transformer.fit_transform(X)
assert_frame_equal(X, fit_transformed[0])
assert fit_transformed[1] is None
fit_transformed = drop_rows_transformer.fit_transform(X, y)
assert_frame_equal(X, fit_transformed[0])
assert_series_equal(y, fit_transformed[1])
def test_drop_rows_transformer_fit_transform_with_target():
X = pd.DataFrame({"a column": [1, 2, 3], "another col": [4, 5, 6]})
y = pd.Series([1, 0, 1])
X_expected = pd.DataFrame({"a column": [1], "another col": [4]})
y_expected = pd.Series([1])
drop_rows_transformer_none = DropRowsTransformer()
drop_rows_transformer_none.fit(X, y)
transformed = drop_rows_transformer_none.transform(X, y)
assert_frame_equal(X, transformed[0])
assert_series_equal(y, transformed[1])
indices_to_drop = [1, 2]
drop_rows_transformer = DropRowsTransformer(indices_to_drop=indices_to_drop)
drop_rows_transformer.fit(X, y)
transformed = drop_rows_transformer.transform(X, y)
assert_frame_equal(X_expected, transformed[0])
assert_series_equal(y_expected, transformed[1])
drop_rows_transformer = DropRowsTransformer(indices_to_drop=indices_to_drop)
fit_transformed = drop_rows_transformer.fit_transform(X, y)
assert_frame_equal(fit_transformed[0], transformed[0])
assert_series_equal(y_expected, fit_transformed[1])
def test_drop_rows_transformer_index_not_in_input():
X = pd.DataFrame({"numerical col": [1, 2]})
y = pd.Series([0, 1], index=["a", "b"])
drop_rows_transformer = DropRowsTransformer(indices_to_drop=[100, 1])
with pytest.raises(ValueError, match="do not exist in input features"):
drop_rows_transformer.fit(X)
drop_rows_transformer = DropRowsTransformer(indices_to_drop=[0])
with pytest.raises(ValueError, match="do not exist in input target"):
drop_rows_transformer.fit(X, y)
def test_drop_rows_transformer_nonnumeric_index():
X = pd.DataFrame({"numeric": [1, 2, 3], "cat": ["a", "b", "c"]})
index = pd.Series(["i", "n", "d"])
X = X.set_index(index)
indices_to_drop = ["i", "n"]
X_expected = X.copy()
X_expected.ww.init()
X_expected = X_expected.drop(indices_to_drop, axis=0)
drop_rows_transformer = DropRowsTransformer(indices_to_drop=indices_to_drop)
drop_rows_transformer.fit(X)
transformed = drop_rows_transformer.transform(X)
| assert_frame_equal(X_expected, transformed[0]) | pandas.testing.assert_frame_equal |
import pandas as pd
from pandas_profiling import ProfileReport
import numpy as np
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import math
import plotly.io as pio
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import squarify
import EDA_plots as plot
import warnings
# set options
warnings.filterwarnings('ignore')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.options.plotting.backend = "plotly"
pio.renderers.default = 'iframe' # or 'notebook' or 'colab'
#sns.set_style('white')
sns.set_context("talk")
##########
def gender_distribution(data):
plt.figure(figsize=(7,4))
g = sns.countplot(
x ='gender',
data = data,
edgecolor='black',
palette={"F": "gold", "M": "limegreen"}
)
ylabels = ['{:,.0f}'.format(y) + 'k' for y in g.get_yticks()/1000]
g.set_yticklabels(ylabels)
g.set_title('Gender Distribution')
g.set_xlabel('')
g.set_ylabel('')
plt.show()
def age_distribution(data):
plt.figure(figsize=(10,4))
g = sns.histplot(
data=data,
x='age',
hue='gender',
bins=24,
multiple='stack',
edgecolor='black',
palette={"F": "gold", "M": "limegreen"}
)
g.set_title('Age Distributions (stacked)')
g.set_ylabel('')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in g.get_yticks()/1000]
g.set_yticklabels(ylabels)
g.set_xlabel('age [years]')
plt.show()
def workout_participation_all(df):
rx,sc = [],[]
for i in [1,2,3,4,5]:
rx.append(df[(df[f'scaled_{i}']==0) & (df[f'score_{i}']!=0)]['competitorid'].count())
sc.append(df[(df[f'scaled_{i}']==1) & (df[f'score_{i}']!=0)]['competitorid'].count())
tot = np.add(rx,sc)
plt.figure(figsize=(15,6))
idx = ['19.1','19.2','19.3','19.4','19.5']
bar1 = sns.barplot(
x=idx,
y=tot,
color='lightblue',
edgecolor='black'
)
bar2 = sns.barplot(
x=idx,
y=rx,
color='darkblue',
edgecolor='black'
)
bar1.set_title('Workout Participation (M & F)')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in bar1.get_yticks()/1000]
bar1.set_yticklabels(ylabels)
bar1.set_xlabel('Open Workout')
top_bar = mpatches.Patch(color='lightblue', label='Scaled')
bottom_bar = mpatches.Patch(color='darkblue', label='Rx')
top_bar
plt.legend(handles=[top_bar, bottom_bar])
plt.show()
def workout_participation_male(df):
rx,sc = [],[]
df_m = df[df['gender']=='M']
for i in [1,2,3,4,5]:
rx.append(df_m[(df_m[f'scaled_{i}']==0) & (df_m[f'score_{i}']!=0)]['competitorid'].count())
sc.append(df_m[(df_m[f'scaled_{i}']==1) & (df_m[f'score_{i}']!=0)]['competitorid'].count())
tot = np.add(rx,sc)
plt.figure(figsize=(15,6))
idx = ['19.1','19.2','19.3','19.4','19.5']
bar1 = sns.barplot(
x=idx,
y=tot,
color='lightblue',
edgecolor='black'
)
bar2 = sns.barplot(
x=idx,
y=rx,
color='gold',
edgecolor='black'
)
bar1.set_title('Workout Participation (Men)')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in bar1.get_yticks()/1000]
bar1.set_yticklabels(ylabels)
bar1.set_xlabel('Open Workout')
top_bar = mpatches.Patch(color='lightblue', label='Scaled')
bottom_bar = mpatches.Patch(color='gold', label='Rx')
plt.legend(handles=[top_bar, bottom_bar])
plt.show()
def workout_participation_female(df):
rx,sc = [],[]
df_f = df[df['gender']=='F']
for i in [1,2,3,4,5]:
rx.append(df_f[(df_f[f'scaled_{i}']==0) & (df_f[f'score_{i}']!=0)]['competitorid'].count())
sc.append(df_f[(df_f[f'scaled_{i}']==1) & (df_f[f'score_{i}']!=0)]['competitorid'].count())
tot = np.add(rx,sc)
plt.figure(figsize=(15,6))
idx = ['19.1','19.2','19.3','19.4','19.5']
bar1 = sns.barplot(
x=idx,
y=tot,
color='lightblue',
edgecolor='black'
)
bar2 = sns.barplot(
x=idx,
y=rx,
color='limegreen',
edgecolor='black'
)
bar1.set_title('Workout Participation (Women)')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in bar1.get_yticks()/1000]
bar1.set_yticklabels(ylabels)
bar1.set_xlabel('Open Workout')
top_bar = mpatches.Patch(color='lightblue', label='Scaled')
bottom_bar = mpatches.Patch(color='limegreen', label='Rx')
plt.legend(handles=[top_bar, bottom_bar])
plt.show()
def workout_balance_1(df):
fig, axes = plt.subplots(1, 3, figsize=(30, 7))
a = sns.histplot(
ax=axes[0],
data=df[df['scaled_1']==0],
x='w1_reps_total',
hue='gender',
bins=30,
multiple='stack',
edgecolor='black',
palette={"F": "gold", "M": "limegreen"}
)
a.set_title('19.1 | Rx')
a.set_ylabel('')
a.set_xlabel('total reps')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in a.get_yticks()/1000]
a.set_yticklabels(ylabels)
b = sns.histplot(
ax=axes[1],
data=df[df['scaled_1']==1],
x='w1_reps_total',
hue='gender',
bins=30,
multiple='stack',
edgecolor='black',
palette={"F": "gold", "M": "limegreen"}
)
b.set_title('19.1 | Scaled')
b.set_ylabel('')
b.set_xlabel('total reps')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in b.get_yticks()/1000]
b.set_yticklabels(ylabels)
df_ta = df[(df['division']=='Men')|(df['division']=='Women')]
ta_1 = int(0.01*df_ta.overallrank.max())
df_ta = df_ta[df_ta['overallrank']<=ta_1]
c = sns.histplot(
ax=axes[2],
data=df_ta,
x='w1_reps_total',
hue='gender',
bins=10,
multiple='stack',
edgecolor='black',
palette={"F": "gold", "M": "limegreen"}
)
c.set_title('19.1 | Top 1%')
c.set_ylabel('')
c.set_xlabel('total reps');
def workout_balance_2_rx(df):
fig = go.Figure()
df_m_rx = df[(df['gender']=='M') & (df['scaled_2']==0) & (df['score_2']!=0)].groupby(by='w2_full_rounds_completed').count()
df_m_rx_list = list(df_m_rx.competitorid)
fig.add_trace(go.Funnel(
name = 'Men',
y = ['start', ' 8 min', '12 min', '16 min', '20 min'],
x = [
sum(df_m_rx_list),
sum(df_m_rx_list)-sum(df_m_rx_list[:1]),
sum(df_m_rx_list)-sum(df_m_rx_list[:2]),
sum(df_m_rx_list)-sum(df_m_rx_list[:3]),
sum(df_m_rx_list)-sum(df_m_rx_list[:4])
],
textinfo = "value+percent initial",
marker = {"color": "#86CE00"}))
df_f_rx = df[(df['gender']=='F') & (df['scaled_2']==0) &(df['score_2']!=0)].groupby(by='w2_full_rounds_completed').count()
df_f_rx_list = list(df_f_rx.competitorid)
fig.add_trace(go.Funnel(
name = 'Women',
y = ['start', ' 8 min', '12 min', '16 min', '20 min'],
x = [
sum(df_f_rx_list),
sum(df_f_rx_list)-sum(df_f_rx_list[:1]),
sum(df_f_rx_list)-sum(df_f_rx_list[:2]),
sum(df_f_rx_list)-sum(df_f_rx_list[:3]),
sum(df_f_rx_list)-sum(df_f_rx_list[:4])
],
textinfo = "value+percent initial",
marker = {"color": "#FBE426"}))
fig.show()
def workout_balance_2_sc(df):
fig = go.Figure()
df_m_sc = df[(df['gender']=='M') & (df['scaled_2']==1) & (df['score_2']!=0)].groupby(by='w2_full_rounds_completed').count()
df_m_sc_list = list(df_m_sc.competitorid)
fig.add_trace(go.Funnel(
name = 'Men',
y = ['start', ' 8 min', '12 min', '16 min', '20 min'],
x = [
sum(df_m_sc_list),
sum(df_m_sc_list)-sum(df_m_sc_list[:1]),
sum(df_m_sc_list)-sum(df_m_sc_list[:2]),
sum(df_m_sc_list)-sum(df_m_sc_list[:3]),
sum(df_m_sc_list)-sum(df_m_sc_list[:4])
],
textinfo = "value+percent initial",
marker = {"color": "#86CE00"}))
df_f_sc = df[(df['gender']=='F') & (df['scaled_2']==1) & (df['score_2']!=0)].groupby(by='w2_full_rounds_completed').count()
df_f_sc_list = list(df_f_sc.competitorid)
fig.add_trace(go.Funnel(
name = 'Women',
y = ['start', ' 8 min', '12 min', '16 min', '20 min'],
x = [
sum(df_f_sc_list),
sum(df_f_sc_list)-sum(df_f_sc_list[:1]),
sum(df_f_sc_list)-sum(df_f_sc_list[:2]),
sum(df_f_sc_list)-sum(df_f_sc_list[:3]),
sum(df_f_sc_list)-sum(df_f_sc_list[:4])
],
textinfo = "value+percent initial",
marker = {"color": "#FBE426"}))
fig.show()
def workout_balance_2_ta(df):
df_ta = df[(df['division']=='Men')|(df['division']=='Women')]
ta_1 = int(0.01*df_ta.overallrank.max())
df_ta = df_ta[df_ta['overallrank']<=ta_1]
fig = go.Figure()
df_m_1 = df_ta[df_ta['gender']=='M'].groupby(by='w2_full_rounds_completed').count()
df_m_1_list = list(df_m_1.competitorid)
fig.add_trace(go.Funnel(
name = 'Men',
y = ['start', ' 8 min', '12 min', '16 min', '20 min'],
x = [
sum(df_m_1_list),
sum(df_m_1_list),
sum(df_m_1_list)-sum(df_m_1_list[:1]),
sum(df_m_1_list)-sum(df_m_1_list[:2]),
sum(df_m_1_list)-sum(df_m_1_list[:3])
],
textinfo = "value+percent initial",
marker = {"color": "#86CE00"}))
df_f_1 = df_ta[df_ta['gender']=='F'].groupby(by='w2_full_rounds_completed').count()
df_f_1_list = list(df_f_1.competitorid)
fig.add_trace(go.Funnel(
name = 'Women',
y = ['start', ' 8 min', '12 min', '16 min', '20 min'],
x = [
sum(df_f_1_list),
sum(df_f_1_list),
sum(df_f_1_list)-sum(df_f_1_list[:1]),
sum(df_f_1_list)-sum(df_f_1_list[:2]),
sum(df_f_1_list)-sum(df_f_1_list[:3])
],
textinfo = "value+percent initial",
marker = {"color": "#FBE426"}))
fig.show()
def workout_balance_3(df):
fig, axes = plt.subplots(1, 3, figsize=(30, 7))
a=sns.histplot(
ax=axes[0],
data=df[df['scaled_3']==0],
x='w3_reps_total',
hue='gender',
bins=30,
multiple='stack',
edgecolor='black',
palette={"F": "gold", "M": "limegreen"}
)
a.set_title('19.3 | Rx')
a.set_ylabel('')
a.set_xlabel('total reps')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in a.get_yticks()/1000]
a.set_yticklabels(ylabels)
b=sns.histplot(
ax=axes[1],
data=df[df['scaled_3']==1],
x='w3_reps_total',
hue='gender',
bins=30,
multiple='stack',
edgecolor='black',
palette={"F": "gold", "M": "limegreen"}
)
b.set_title('19.3 | Scaled')
b.set_ylabel('')
b.set_xlabel('total reps')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in b.get_yticks()/1000]
b.set_yticklabels(ylabels)
df_ta = df[(df['division']=='Men')|(df['division']=='Women')]
ta_1 = int(0.01*df_ta.overallrank.max())
df_ta = df_ta[df_ta['overallrank']<=ta_1]
c=sns.histplot(
ax=axes[2],
data=df_ta,
x='w3_reps_total',
hue='gender',
bins=25,
multiple='stack',
edgecolor='black',
palette={"F": "gold", "M": "limegreen"}
)
c.set_title('19.3 | Top 1%')
c.set_ylabel('')
c.set_xlabel('total reps');
def workout_balance_3_rx(df):
fig = go.Figure()
df_m_rx = df[(df['gender']=='M') & (df['scaled_3']==0) & (df['score_3']!=0)].groupby(by='w3_full_rounds_completed').count()
df_m_rx_list = list(df_m_rx.competitorid)
fig.add_trace(go.Funnel(
name = 'Men',
y = ['start', 'OHL', 'Box SU', 'HSPU', 'HS-Walk'],
x = [
sum(df_m_rx_list),
sum(df_m_rx_list)-sum(df_m_rx_list[:1]),
sum(df_m_rx_list)-sum(df_m_rx_list[:2]),
sum(df_m_rx_list)-sum(df_m_rx_list[:3]),
sum(df_m_rx_list)-sum(df_m_rx_list[:4])
],
textinfo = "value+percent initial",
marker = {"color": "#86CE00"}))
df_f_rx = df[(df['gender']=='F') & (df['scaled_3']==0) & (df['score_3']!=0)].groupby(by='w3_full_rounds_completed').count()
df_f_rx_list = list(df_f_rx.competitorid)
fig.add_trace(go.Funnel(
name = 'Women',
y = ['start', 'OHL', 'Box SU', 'HSPU', 'HS-Walk'],
x = [
sum(df_f_rx_list),
sum(df_f_rx_list)-sum(df_f_rx_list[:1]),
sum(df_f_rx_list)-sum(df_f_rx_list[:2]),
sum(df_f_rx_list)-sum(df_f_rx_list[:3]),
sum(df_f_rx_list)-sum(df_f_rx_list[:4])
],
textinfo = "value+percent initial",
marker = {"color": "#FBE426"}))
fig.show()
def workout_balance_3_sc(df):
fig = go.Figure()
df_m_sc = df[(df['gender']=='M') & (df['scaled_3']==1) & (df['score_3']!=0)].groupby(by='w3_full_rounds_completed').count()
df_m_sc_list = list(df_m_sc.competitorid)
fig.add_trace(go.Funnel(
name = 'Men',
y = ['start', 'OHL', 'Box SU', 'HSPU', 'HS-Walk'],
x = [
sum(df_m_sc_list),
sum(df_m_sc_list)-sum(df_m_sc_list[:1]),
sum(df_m_sc_list)-sum(df_m_sc_list[:2]),
sum(df_m_sc_list)-sum(df_m_sc_list[:3]),
sum(df_m_sc_list)-sum(df_m_sc_list[:4])
],
textinfo = "value+percent initial",
marker = {"color": "#86CE00"}))
df_f_sc = df[(df['gender']=='F') & (df['scaled_2']==1) & (df['score_3']!=0)].groupby(by='w3_full_rounds_completed').count()
df_f_sc_list = list(df_f_sc.competitorid)
fig.add_trace(go.Funnel(
name = 'Women',
y = ['start', 'OHL', 'Box SU', 'HSPU', 'HS-Walk'],
x = [
sum(df_f_sc_list),
sum(df_f_sc_list)-sum(df_f_sc_list[:1]),
sum(df_f_sc_list)-sum(df_f_sc_list[:2]),
sum(df_f_sc_list)-sum(df_f_sc_list[:3]),
sum(df_f_sc_list)-sum(df_f_sc_list[:4])
],
textinfo = "value+percent initial",
marker = {"color": "#FBE426"}))
fig.show()
def workout_balance_3_ta(df):
df_ta = df[(df['division']=='Men')|(df['division']=='Women')]
ta_1 = int(0.01*df_ta.overallrank.max())
df_ta = df_ta[df_ta['overallrank']<=ta_1]
fig = go.Figure()
df_m_1 = df_ta[df_ta['gender']=='M'].groupby(by='w3_full_rounds_completed').count()
df_m_1_list = list(df_m_1.competitorid)
fig.add_trace(go.Funnel(
name = 'Men',
y = ['Box SU', 'HSPU', 'HS-Walk'],
x = [
sum(df_m_1_list),
sum(df_m_1_list)-sum(df_m_1_list[:1]),
sum(df_m_1_list)-sum(df_m_1_list[:2]),
sum(df_m_1_list)-sum(df_m_1_list[:3]),
sum(df_m_1_list)-sum(df_m_1_list[:4])
],
textinfo = "value+percent initial",
marker = {"color": "#86CE00"}))
df_f_1 = df_ta[df_ta['gender']=='F'].groupby(by='w3_full_rounds_completed').count()
df_f_1_list = list(df_f_1.competitorid)
fig.add_trace(go.Funnel(
name = 'Women',
y = ['Box SU', 'HSPU', 'HS-Walk'],
x = [
sum(df_f_1_list),
sum(df_f_1_list)-sum(df_f_1_list[:1]),
sum(df_f_1_list)-sum(df_f_1_list[:2]),
sum(df_f_1_list)-sum(df_f_1_list[:3]),
sum(df_f_1_list)-sum(df_f_1_list[:4])
],
textinfo = "value+percent initial",
marker = {"color": "#FBE426"}))
fig.show()
def workout_balance_4(df):
fig, axes = plt.subplots(1, 3, figsize=(30, 7))
a = sns.histplot(
ax=axes[0],
data=df[df['scaled_4']==0],
x='w4_reps_total',
hue='gender',
bins=33,
multiple='stack',
palette={"F": "gold", "M": "limegreen"}
)
a.set_title('19.4 | Rx')
a.set_ylabel('')
a.set_xlabel('total reps')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in a.get_yticks()/1000]
a.set_yticklabels(ylabels)
b = sns.histplot(
ax=axes[1],
data=df[df['scaled_4']==1],
x='w4_reps_total',
hue='gender',
bins=33,
multiple='stack',
palette={"F": "gold", "M": "limegreen"}
)
b.set_title('19.4 | Scaled')
b.set_ylabel('')
b.set_xlabel('total reps')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in b.get_yticks()/1000]
b.set_yticklabels(ylabels)
df_ta = df[(df['division']=='Men')|(df['division']=='Women')]
ta_1 = int(0.01*df_ta.overallrank.max())
df_ta = df_ta[df_ta['overallrank']<=ta_1]
c = sns.histplot(
ax=axes[2],
data=df_ta,
x='w4_reps_total',
hue='gender',
bins=15,
multiple='stack',
palette={"F": "gold", "M": "limegreen"}
)
c.set_title('19.4 | Top 1%')
c.set_ylabel('')
c.set_xlabel('total reps')
ylabels = ['{:,.1f}'.format(y) + 'k' for y in c.get_yticks()/1000]
c.set_yticklabels(ylabels);
def workout_balance_5(df):
fig, axes = plt.subplots(1, 3, figsize=(30, 7))
a = sns.histplot(
ax=axes[0],
data=df[df['scaled_5']==0],
x='w5_reps_total',
hue='gender',
bins=15,
multiple='stack',
palette={"F": "gold", "M": "limegreen"}
)
a.set_title('19.5 | Rx')
a.set_ylabel('')
a.set_xlabel('total reps')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in a.get_yticks()/1000]
a.set_yticklabels(ylabels)
b = sns.histplot(
ax=axes[1],
data=df[df['scaled_5']==1],
x='w5_reps_total',
hue='gender',
bins=15,
multiple='stack',
palette={"F": "gold", "M": "limegreen"}
)
b.set_title('19.5 | Scaled')
b.set_ylabel('')
b.set_xlabel('total reps')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in b.get_yticks()/1000]
b.set_yticklabels(ylabels)
df_ta = df[(df['division']=='Men')|(df['division']=='Women')]
ta_1 = int(0.01*df_ta.overallrank.max())
df_ta = df_ta[df_ta['overallrank']<=ta_1]
c = sns.histplot(
ax=axes[2],
data=df_ta,
x='w5_reps_total',
hue='gender',
bins=3,
multiple='stack',
palette={"F": "gold", "M": "limegreen"}
)
c.set_title('19.5 | Top 1%')
c.set_ylabel('')
c.set_xlabel('total reps')
ylabels = ['{:,.1f}'.format(y) + 'k' for y in c.get_yticks()/1000]
c.set_yticklabels(ylabels);
def ranking_1_m_rx(df):
df_m = df[(df['division']=='Men')&(df['scaled_1']==0)]
max_m = (df_m['rank_1'].max())/100
a_m=list(df_m.groupby(by='w1_reps_total')['rank_1'].mean())
a_m=np.divide(a_m,max_m)
b_m=list(df_m.groupby(by='w1_reps_total')['w1_reps_total'])
c_m=[]
for i in b_m:
c_m.append(i[0])
g=sns.relplot(x=c_m, y=a_m, kind="line",linewidth=4)
g.fig.set_size_inches(15,5)
g.set(xlabel='total reps', ylabel='rank [%]', title= '19.1 | Male | Rx');
def ranking_1_m_rx_ta(df):
df_m = df[(df['division']=='Men')&(df['scaled_1']==0)]
max_m = (df_m['rank_1'].max())/100
a_m=list(df_m.groupby(by='w1_reps_total')['rank_1'].mean())
a_m=np.divide(a_m,max_m)
b_m=list(df_m.groupby(by='w1_reps_total')['w1_reps_total'])
c_m=[]
for i in b_m:
c_m.append(i[0])
last=58
g=sns.relplot(x=c_m[-last:], y=a_m[-last:], kind="line",linewidth=4)
g.fig.set_size_inches(15,5)
g.set(
xlabel='total reps',
ylabel='rank [%]',
title= '19.1 | Male | Rx | Top 1%')
g.fig.set_figheight(4)
g.fig.set_figwidth(7)
def ranking_2_m_rx(df):
df_m = df[(df['division']=='Men')&(df['scaled_2']==0)]
max_m = (df_m['rank_2'].max())/100
a_m=list(df_m.groupby(by='w2_reps_total')['rank_2'].mean())
a_m=np.divide(a_m,max_m)
b_m=list(df_m.groupby(by='w2_reps_total')['w2_reps_total'])
c_m=[]
for i in b_m:
c_m.append(i[0])
g=sns.relplot(x=c_m, y=a_m, kind="line",linewidth=4)
g.fig.set_size_inches(15,5)
g.set(xlabel='total reps', ylabel='rank [%]', title= '19.2 | Male | Rx');
def ranking_2_m_rx_ta(df):
df_m = df[(df['division']=='Men')&(df['scaled_2']==0)]
max_m = (df_m['rank_2'].max())/100
a_m=list(df_m.groupby(by='w2_reps_total')['rank_2'].mean())
a_m=np.divide(a_m,max_m)
b_m=list(df_m.groupby(by='w2_reps_total')['w2_reps_total'])
c_m=[]
for i in b_m:
c_m.append(i[0])
last=30
g=sns.relplot(x=c_m[-last:], y=a_m[-last:], kind="line",linewidth=4)
g.fig.set_size_inches(15,5)
g.set(
xlabel='total reps',
ylabel='rank [%]',
title= '19.2 | Male | Rx | Top 1%')
g.fig.set_figheight(4)
g.fig.set_figwidth(7)
def ranking_3_m_rx(df):
df_m = df[(df['division']=='Men')&(df['scaled_3']==0)]
max_m = (df_m['rank_3'].max())/100
a_m=list(df_m.groupby(by='w3_reps_total')['rank_3'].mean())
a_m=np.divide(a_m,max_m)
b_m=list(df_m.groupby(by='w3_reps_total')['w3_reps_total'])
c_m=[]
for i in b_m:
c_m.append(i[0])
g=sns.relplot(x=c_m, y=a_m, kind="line",linewidth=4)
g.fig.set_size_inches(15,5)
g.set(xlabel='total reps', ylabel='rank [%]', title= '19.3 | Male | Rx');
def ranking_3_m_rx_ta(df):
df_m = df[(df['division']=='Men')&(df['scaled_3']==0)]
max_m = (df_m['rank_3'].max())/100
a_m=list(df_m.groupby(by='w3_reps_total')['rank_3'].mean())
a_m=np.divide(a_m,max_m)
b_m=list(df_m.groupby(by='w3_reps_total')['w3_reps_total'])
c_m=[]
for i in b_m:
c_m.append(i[0])
last=27
g=sns.relplot(x=c_m[-last:], y=a_m[-last:], kind="line",linewidth=4)
g.fig.set_size_inches(15,5)
g.set(
xlabel='total reps',
ylabel='rank [%]',
title= '19.3 | Male | Rx | Top 1%')
g.fig.set_figheight(4)
g.fig.set_figwidth(7)
def ranking_4_m_rx(df):
df_m = df[(df['division']=='Men')&(df['scaled_4']==0)]
max_m = (df_m['rank_4'].max())/100
a_m=list(df_m.groupby(by='w4_reps_total')['rank_4'].mean())
a_m=np.divide(a_m,max_m)
b_m=list(df_m.groupby(by='w4_reps_total')['w4_reps_total'])
c_m=[]
for i in b_m:
c_m.append(i[0])
g=sns.relplot(x=c_m, y=a_m, kind="line",linewidth=4)
g.fig.set_size_inches(15,5)
g.set(xlabel='total reps', ylabel='rank [%]', title= '19.4 | Male | Rx');
def ranking_4_m_rx_ta(df):
df_m = df[(df['division']=='Men')&(df['scaled_4']==0)]
max_m = (df_m['rank_4'].max())/100
a_m=list(df_m.groupby(by='w4_reps_total')['rank_4'].mean())
a_m=np.divide(a_m,max_m)
b_m=list(df_m.groupby(by='w4_reps_total')['w4_reps_total'])
c_m=[]
for i in b_m:
c_m.append(i[0])
last=15
g=sns.relplot(x=c_m[-last:], y=a_m[-last:], kind="line",linewidth=4)
g.fig.set_size_inches(15,5)
g.set(
xlabel='total reps',
ylabel='rank [%]',
title= '19.4 | Male | Rx | Top 1%')
g.fig.set_figheight(4)
g.fig.set_figwidth(7)
def ranking_5_m_rx(df):
df_m = df[(df['division']=='Men')&(df['scaled_5']==0)]
max_m = (df_m['rank_5'].max())/100
a_m=list(df_m.groupby(by='w5_reps_total')['rank_5'].mean())
a_m=np.divide(a_m,max_m)
b_m=list(df_m.groupby(by='w5_reps_total')['w5_reps_total'])
c_m=[]
for i in b_m:
c_m.append(i[0])
g=sns.relplot(x=c_m, y=a_m, kind="line",linewidth=4)
g.fig.set_size_inches(15,5)
g.set(xlabel='total reps', ylabel='rank [%]', title= '19.5 | Male | Rx');
def ranking_5_m_rx_ta(df):
df_m = df[(df['division']=='Men')&(df['scaled_5']==0)]
max_m = (df_m['rank_5'].max())/100
a_m=list(df_m.groupby(by='w5_reps_total')['rank_5'].mean())
a_m=np.divide(a_m,max_m)
b_m=list(df_m.groupby(by='w5_reps_total')['w5_reps_total'])
c_m=[]
for i in b_m:
c_m.append(i[0])
last=20
g=sns.relplot(x=c_m[-last:], y=a_m[-last:], kind="line",linewidth=4)
g.fig.set_size_inches(15,5)
g.set(
xlabel='total reps',
ylabel='rank [%]',
title= '19.5 | Male | Rx | Top 1%')
g.fig.set_figheight(4)
g.fig.set_figwidth(7)
def regional_participation(df):
plt.figure(figsize=(18,7))
g = sns.countplot(
data=df,
x='region',
hue='gender',
order=df.region.value_counts().iloc[:7].index,
edgecolor='black',
palette={"F": "gold", "M": "limegreen"}
)
g.set_ylabel('')
g.set_xlabel('')
g.set_title('Regional Participation')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in g.get_yticks()/1000]
g.set_yticklabels(ylabels)
plt.show()
def regional_ranking(df):
plt.figure(figsize=(20,9))
g = sns.barplot(
data=df[(df['division']=='Men')|(df['division']=='Women')],
x='region',
y='overallrank',
hue='gender',
order=df.region.value_counts().iloc[:7].index,
edgecolor='black',
palette={"F": "gold", "M": "limegreen"}
)
g.set_title('Average Ranking')
g.set_xlabel('')
g.set_ylabel('Overall Ranking')
ylabels = ['{:,.0f}'.format(y) + 'k' for y in g.get_yticks()/1000]
g.set_yticklabels(ylabels)
plt.show()
countries = ['United States', 'Bahamas', 'New Zealand', 'Canada',
'Russian Federation', 'Kenya', 'Australia', 'Costa Rica',
'Denmark', 'United Kingdom', 'Sweden', 'Netherlands', 'France',
'Ireland', 'Switzerland', 'South Africa', 'Germany', 'Argentina',
'Italy', 'Norway', 'Iceland', 'Zimbabwe', 'Peru', 'Portugal',
'Korea, Republic of', 'Philippines', 'Colombia', 'Mexico',
'Singapore', 'Chile', 'Israel', 'Brazil', 'Ukraine', 'Spain',
'Ecuador', 'United Arab Emirates', 'Slovenia', 'Turkey', 'China',
'Jamaica', 'Bahrain', 'Czech Republic', 'Malaysia', 'Egypt',
'Hungary', 'Indonesia', 'Finland', 'Angola', 'Tonga',
'Trinidad and Tobago', 'Belgium', 'Brunei Darussalam',
'Afghanistan', 'Austria', 'Croatia', 'Latvia', 'India', 'Malta',
'Bulgaria', 'Thailand', 'Morocco', 'Greece', 'Iran', 'Japan',
'Dominican Republic', 'Barbados', 'Honduras', 'Guatemala',
'Panama', 'Samoa', 'Venezuela', 'Iraq', 'Slovakia', 'Viet Nam',
'Romania', 'Lebanon', 'Poland', 'Belarus', 'Sri Lanka', 'Kuwait',
'Palestinian Territory', 'Namibia', 'Estonia', 'Tunisia',
'Bolivia', 'Saudi Arabia', 'Luxembourg', 'Nigeria',
'Papua New Guinea', 'Zambia', 'El Salvador', 'Serbia', 'Oman',
'Macedonia', 'Guyana', 'Tanzania', 'Fiji', 'Andorra', 'Cyprus',
'Nicaragua', 'Belize', 'Jordan', 'Qatar', 'Uruguay', 'Lithuania',
'Montenegro', 'Uganda', 'Paraguay', 'Madagascar', 'Kazakhstan',
'Cambodia', 'San Marino', 'Mauritius', 'Algeria', 'Suriname',
'Saint Vincent/Grenadines', 'Bosnia and Herzegovina',
'Congo, The Republic of', 'Libya', 'Pakistan', 'Mozambique',
'Liechtenstein', 'Moldova', 'Syrian Arab Republic', 'Kyrgyzstan',
'Saint Lucia', 'Uzbekistan', 'Azerbaijan', 'Senegal',
'Congo, The Democratic Republic of the', 'Mongolia', 'Kosovo',
'Botswana', 'Djibouti', 'Armenia', 'Georgia', 'Somalia', 'Vanuatu',
"Côte d'Ivoire", 'Ghana', 'Antigua and Barbuda', 'Tajikistan',
'Nepal', 'Yemen', 'Rwanda', 'Tuvalu', 'Myanmar', 'Guinea-Bissau',
'Maldives', 'Eritrea', 'Niger', 'Albania', 'Cameroon', 'Mali',
'Malawi', 'Bangladesh']
def top_countries(df):
df_x = df[(df['division']=='Men') | (df['division']=='Women')]
df_1 = df_x[df_x['countryoforiginname']=='United States'].sort_values(by='overallrank')[:100]
df_2 = df_x[df_x['countryoforiginname']=='Canada'].sort_values(by='overallrank')[:100]
df_3 = df_x[df_x['countryoforiginname']=='Australia'].sort_values(by='overallrank')[:100]
df_4 = df_x[df_x['countryoforiginname']=='United Kingdom'].sort_values(by='overallrank')[:100]
df_5 = df_x[df_x['countryoforiginname']=='France'].sort_values(by='overallrank')[:100]
df_6 = df_x[df_x['countryoforiginname']=='New Zealand'].sort_values(by='overallrank')[:100]
df_7 = df_x[df_x['countryoforiginname']=='Brazil'].sort_values(by='overallrank')[:100]
df_8 = df_x[df_x['countryoforiginname']=='Russian Federation'].sort_values(by='overallrank')[:100]
df_9 = df_x[df_x['countryoforiginname']=='Sweden'].sort_values(by='overallrank')[:100]
df_10 = df_x[df_x['countryoforiginname']=='Italy'].sort_values(by='overallrank')[:100]
df_tot = | pd.concat([df_1,df_2,df_3,df_4,df_5,df_6,df_7,df_8,df_9,df_10]) | pandas.concat |
import logging
import yaml
import os
import docker
import re
import sys
from tempfile import NamedTemporaryFile
import numpy as np
import pandas as pd
from pandas.errors import EmptyDataError
from docker.errors import NotFound, APIError
from io import StringIO
# from pynomer.client import NomerClient
# from ..core import URIMapper, URIManager, TaxId
from ..util.taxo_helper import *
pd.options.mode.chained_assignment = None
"""
https://github.com/globalbioticinteractions/globalbioticinteractions/wiki/Taxonomy-Matching
"""
class NoValidColumnException(Exception):
pass
class ConfigurationError(Exception):
pass
def create_mapping(df):
"""
Return a dict that keeps track of duplicated items in a DataFrame
"""
return (
df.reset_index()
.groupby(df.columns.tolist(), dropna=False)["index"]
.agg(["first", tuple])
.set_index("first")["tuple"]
.to_dict()
)
class TaxonomicEntityValidator:
def __init__(self, config):
self.logger = logging.getLogger(__name__)
self.config = config
self.taxo_to_matcher = {
"GBIF": "gbif",
"NCBI": "ncbi",
"IF": "indexfungorum",
"SILVA": "ncbi",
}
self.default_name_matcher = "globalnames"
self.nomer = NomerHelper()
def validate(self, df):
"""For a subset of columns (e.g. consumers and resources),
validate taxonomic ids and/or names against a source taxonomy.
Returns the input DataFrame with new columns containing the valid
ids and names for each query column.
"""
for column_config in self.config.columns:
# Set default values
assert column_config.uri_column != None
column_config.id_column = (
column_config.id_column if "id_column" in column_config else None
)
column_config.name_column = (
column_config.name_column if "name_column" in column_config else None
)
column_config.source_taxonomy = (
column_config.source_taxonomy
if "source_taxonomy" in column_config
else None
)
if not (column_config.id_column or column_config.name_column):
raise NoValidColumnException(
"You should specify at least one valid column containing the taxon names or ids."
)
# Map taxa to target taxonomy
self.logger.info(
f"Validate {df.shape[0]} taxa from columns ({column_config.id_column},{column_config.name_column})"
)
valid_df = self.validate_columns(
df,
id_column=column_config.id_column,
name_column=column_config.name_column,
source_taxonomy=column_config.source_taxonomy,
)
df[column_config.uri_column] = valid_df["iri"]
df[column_config.valid_name_column] = valid_df["valid_name"]
df[column_config.valid_id_column] = valid_df["valid_id"]
return df
def validate_columns(
self, df, id_column=None, name_column=None, source_taxonomy=None
):
"""
Taxonomic entity validation consists in checking that the pair (taxid, name)
is valid in a given taxonomy (both taxid and name are optional, but at least
one of them must exist). This function adds a column "valid_id" and a column
"valid_name" to the input DataFrame. If both values are NaN, the corresponding
entity is considered invalid.
"""
def add_prefix(col, src_taxo):
"""
Add the source taxonomy name as a prefix to all taxids in a column
"""
def return_prefixed(id, src_taxo):
if (
pd.notnull(id) and len(str(id).split(":")) == 2
): # .startswith(src_taxo + ":"):
return (
id
if not pd.isna(
pd.to_numeric(str(id).split(":")[-1], errors="coerce")
)
else np.nan
)
elif pd.notnull(id) and pd.isna(pd.to_numeric(id, errors="coerce")):
return np.nan
elif pd.notnull(id):
return f"{src_taxo}:{id}"
else:
return None
return col.map(lambda id: return_prefixed(id, src_taxo))
assert id_column or name_column
subset = [col for col in [id_column, name_column] if col]
sub_df = df[subset].astype(pd.StringDtype(), errors="ignore")
mapping = create_mapping(
sub_df
) # Mapping from items in drop_df to all duplicates in sub_df
drop_df = sub_df.drop_duplicates(subset=subset).replace("", np.nan)
id_df = None
name_df = None
if id_column:
assert source_taxonomy
if source_taxonomy in self.taxo_to_matcher:
drop_df[id_column] = add_prefix(drop_df[id_column], source_taxonomy)
id_df = drop_df.dropna(subset=[id_column])
if name_column:
drop_df["canonical_name"] = drop_df[name_column]
names = drop_df["canonical_name"].dropna().to_list()
norm_names = self.normalize_names(names)
drop_df.replace({"canonical_name": norm_names}, inplace=True)
if id_df is not None:
name_df = drop_df.loc[~drop_df.index.isin(id_df.index)]
else:
name_df = drop_df.dropna(subset=["canonical_name"])
sub_df["valid_id"] = None
sub_df["valid_name"] = None
sub_df["iri"] = None
if id_df is not None and not id_df.empty:
valid_ids = self.validate_taxids(
id_df, id_column, name_column, source_taxonomy
)
valid_ids = valid_ids.groupby(
["queryId"], dropna=False
) # Get all matches for each id
for index, row in drop_df.iterrows():
id = row[id_column]
if | pd.notnull(id) | pandas.notnull |
import datetime as dt
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.datamodel import Observation
from solarforecastarbiter.validation import tasks, validator
from solarforecastarbiter.validation.quality_mapping import (
LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING,
DAILY_VALIDATION_FLAG)
@pytest.fixture()
def make_observation(single_site):
def f(variable):
return Observation(
name='test', variable=variable, interval_value_type='mean',
interval_length=pd.Timedelta('1hr'), interval_label='beginning',
site=single_site, uncertainty=0.1, observation_id='OBSID',
provider='Organization 1', extra_parameters='')
return f
@pytest.fixture()
def default_index(single_site):
return [pd.Timestamp('2019-01-01T08:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T09:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T10:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T11:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T13:00:00', tz=single_site.timezone)]
@pytest.fixture()
def daily_index(single_site):
out = pd.date_range(start='2019-01-01T08:00:00',
end='2019-01-01T19:00:00',
freq='1h',
tz=single_site.timezone)
return out.append(
pd.Index([pd.Timestamp('2019-01-02T09:00:00',
tz=single_site.timezone)]))
def test_validate_ghi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi')
data = pd.Series([10, 1000, -100, 500, 300], index=default_index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_validate_mostly_clear(mocker, make_observation):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi').replace(interval_length=pd.Timedelta('5min'))
index = pd.date_range(start='2019-04-01T11:00', freq='5min',
tz=obs.site.timezone, periods=11)
data = pd.Series([742, 749, 756, 763, 769, 774, 779, 784, 789, 793, 700],
index=index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series([1] * 10 + [0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_apply_immediate_validation(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
def test_apply_immediate_validation_already_validated(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 18), (100, 18), (200, 18), (-1, 19), (1500, 18)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity'])
def test_apply_immediate_validation_other(
mocker, make_observation, default_index, var):
mock = mocker.MagicMock()
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: mock})
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
@pytest.mark.parametrize('var', ['availability', 'curtailment', 'event',
'net_load'])
def test_apply_immediate_validation_defaults(
mocker, make_observation, default_index, var):
mock = mocker.spy(tasks, 'validate_defaults')
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
def test_fetch_and_validate_observation_ghi(mocker, make_observation,
default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_ghi_nones(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(None, 1)] * 5, index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
base = (
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
LATEST_VERSION_FLAG
)
out['quality_flag'] = [
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base,
base,
base,
base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_not_listed(mocker, make_observation,
default_index):
obs = make_observation('curtailment')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dni(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dni_limits_QCRad']]
obs = make_observation('dni')
data = pd.Series([10, 1000, -100, 500, 500], index=default_index)
flags = tasks.validate_dni(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 0, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dni(mocker, make_observation,
default_index):
obs = make_observation('dni')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dhi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dhi_limits_QCRad']]
obs = make_observation('dhi')
data = pd.Series([10, 1000, -100, 200, 200], index=default_index)
flags = tasks.validate_dhi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dhi(mocker, make_observation,
default_index):
obs = make_observation('dhi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_poa_global(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_poa_clearsky']]
obs = make_observation('poa_global')
data = pd.Series([10, 1000, -400, 300, 300], index=default_index)
flags = tasks.validate_poa_global(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_poa_global(mocker, make_observation,
default_index):
obs = make_observation('poa_global')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_air_temp(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_temperature_limits']]
obs = make_observation('air_temperature')
data = pd.Series([10, 1000, -400, 30, 20], index=default_index)
flags = tasks.validate_air_temperature(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_air_temperature(
mocker, make_observation, default_index):
obs = make_observation('air_temperature')
data = pd.DataFrame(
[(0, 0), (200, 0), (20, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_wind_speed(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_wind_limits']]
obs = make_observation('wind_speed')
data = pd.Series([10, 1000, -400, 3, 20], index=default_index)
flags = tasks.validate_wind_speed(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_wind_speed(
mocker, make_observation, default_index):
obs = make_observation('wind_speed')
data = pd.DataFrame(
[(0, 0), (200, 0), (15, 0), (1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_relative_humidity(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_rh_limits']]
obs = make_observation('relative_humidity')
data = pd.Series([10, 101, -400, 60, 20], index=default_index)
flags = tasks.validate_relative_humidity(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_relative_humidity(
mocker, make_observation, default_index):
obs = make_observation('relative_humidity')
data = pd.DataFrame(
[(0, 0), (200, 0), (15, 0), (40, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_ac_power(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ac_power_limits']]
obs = make_observation('ac_power')
data = pd.Series([0, 1, -1, 0.001, 0.001], index=default_index)
flags = tasks.validate_ac_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_ac_power(mocker, make_observation,
default_index):
obs = make_observation('ac_power')
data = pd.DataFrame(
[(0, 0), (1, 0), (-1, 0), (0.001, 1), (0.001, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dc_power(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dc_power_limits']]
obs = make_observation('dc_power')
data = pd.Series([0, 1, -1, 0.001, 0.001], index=default_index)
flags = tasks.validate_dc_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dc_power(mocker, make_observation,
default_index):
obs = make_observation('dc_power')
data = pd.DataFrame(
[(0, 0), (1, 0), (-1, 0), (0.001, 1), (0.001, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_daily_ghi(mocker, make_observation, daily_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi',
'detect_stale_values',
'detect_interpolation']]
obs = make_observation('ghi')
data = pd.Series(
# 8 9 10 11 12 13 14 15 16 17 18 19 23
[10, 1000, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
index=daily_index)
flags = tasks.validate_daily_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
)
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_ghi_daily(mocker, make_observation,
daily_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(10, 0), (1000, 0), (-100, 0), (500, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
BASE_FLAG = LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
BASE_FLAG
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
def test_fetch_and_validate_observation_ghi_zeros(mocker, make_observation,
daily_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0)] * 13,
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
base = (
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG |
DAILY_VALIDATION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG |
DAILY_VALIDATION_FLAG,
base,
base,
base,
base,
base,
base,
base,
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY']
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
def test_validate_daily_dc_power(mocker, make_observation, daily_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'detect_stale_values',
'detect_interpolation']]
obs = make_observation('dc_power')
data = pd.Series(
# 8 9 10 11 12 13 14 15 16 17 18 19 23
[0, 1000, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
index=daily_index)
flags = tasks.validate_daily_dc_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
)
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dc_power_daily(
mocker, make_observation, daily_index):
obs = make_observation('dc_power')
data = pd.DataFrame(
[(10, 0), (1000, 0), (-100, 0), (500, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
BASE_FLAG = LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
BASE_FLAG
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
def test_validate_daily_ac_power(mocker, make_observation, daily_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'detect_stale_values',
'detect_interpolation',
'detect_clipping']]
obs = make_observation('ac_power')
data = pd.Series(
# 8 9 10 11 12 13 14 15 16 17 18 19 23
[0, 100, -100, 100, 300, 300, 300, 300, 100, 0, 100, 0, 0],
index=daily_index)
flags = tasks.validate_daily_ac_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES']
)
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_ac_power_daily(
mocker, make_observation, daily_index):
obs = make_observation('ac_power')
data = pd.DataFrame(
[(10, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
BASE_FLAG = LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
BASE_FLAG
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity', 'net_load',
])
def test_fetch_and_validate_observation_other(var, mocker, make_observation,
daily_index):
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
validated = pd.Series(2, index=daily_index)
validate_mock = mocker.MagicMock(return_value=validated)
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: validate_mock})
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
assert post_mock.called
assert validate_mock.called
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity'])
def test_apply_daily_validation_other(
mocker, make_observation, daily_index, var):
mock = mocker.MagicMock()
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: mock})
mocks = [mock,
mocker.spy(tasks, '_validate_stale_interpolated')]
obs = make_observation(var)
data = pd.DataFrame({
'value': [
# 8 9 10 11 12 13 14 15 16 17 18 19 23
10, 1900, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
'quality_flag': 0}, index=daily_index)
out = tasks.apply_daily_validation(obs, data)
assert (out['quality_flag'] | DAILY_VALIDATION_FLAG).all()
for mock in mocks:
assert mock.called
@pytest.mark.parametrize('var', ['net_load'])
def test_apply_daily_validation_defaults(
mocker, make_observation, daily_index, var):
mocks = [mocker.spy(tasks, 'validate_defaults'),
mocker.spy(tasks, '_validate_stale_interpolated')]
obs = make_observation(var)
data = pd.DataFrame({
'value': [
# 8 9 10 11 12 13 14 15 16 17 18 19 23
10, 1900, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
'quality_flag': 0}, index=daily_index)
out = tasks.apply_daily_validation(obs, data)
assert (out['quality_flag'] | DAILY_VALIDATION_FLAG).all()
for mock in mocks:
assert mock.called
def test_apply_daily_validation(mocker, make_observation, daily_index):
obs = make_observation('ac_power')
data = pd.DataFrame({
'value': [
# 8 9 10 11 12 13 14 15 16 17 18 19 23
0, 100, -100, 100, 300, 300, 300, 300, 100, 0, 100, 0, 0],
'quality_flag': 94},
index=daily_index)
out = tasks.apply_daily_validation(obs, data)
qf = (pd.Series(LATEST_VERSION_FLAG, index=data.index),
pd.Series(DAILY_VALIDATION_FLAG, index=data.index),
pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES']
)
exp = data.copy()
exp['quality_flag'] = sum(qf)
assert_frame_equal(exp, out)
def test_apply_daily_validation_not_enough(mocker, make_observation):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0)],
index=pd.date_range(start='2019-01-01T0000Z',
end='2019-01-01T0100Z',
tz='UTC',
freq='1h'),
columns=['value', 'quality_flag'])
with pytest.raises(IndexError):
tasks.apply_daily_validation(obs, data)
def test_fetch_and_validate_all_observations(mocker, make_observation,
daily_index):
obs = [make_observation('dhi'), make_observation('dni')]
obs += [make_observation('ghi').replace(provider='Organization 2')]
data = pd.DataFrame(
[(0, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.list_observations',
return_value=obs)
mocker.patch('solarforecastarbiter.io.api.APISession.get_user_info',
return_value={'organization': obs[0].provider})
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
validated = pd.Series(2, index=daily_index)
validate_mock = mocker.MagicMock(return_value=validated)
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{'dhi': validate_mock, 'dni': validate_mock})
tasks.fetch_and_validate_all_observations(
'', data.index[0], data.index[-1], only_missing=False)
assert post_mock.called
assert validate_mock.call_count == 2
def test_fetch_and_validate_all_observations_only_missing(
mocker, make_observation, daily_index):
obs = [make_observation('dhi'), make_observation('dni')]
obs += [make_observation('ghi').replace(provider='Organization 2')]
data = pd.DataFrame(
[(0, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.list_observations',
return_value=obs)
mocker.patch('solarforecastarbiter.io.api.APISession.get_user_info',
return_value={'organization': obs[0].provider})
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values_not_flagged', # NOQA
return_value=np.array(['2019-01-01', '2019-01-02'],
dtype='datetime64[D]'))
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_all_observations(
'', data.index[0], data.index[-1], only_missing=True)
assert post_mock.called
assert (post_mock.call_args_list[0][0][1].index.date ==
dt.date(2019, 1, 1)).all()
assert (post_mock.call_args_list[1][0][1].index.date ==
dt.date(2019, 1, 2)).all()
assert (post_mock.call_args_list[2][0][1].index.date ==
dt.date(2019, 1, 1)).all()
assert (post_mock.call_args_list[3][0][1].index.date ==
dt.date(2019, 1, 2)).all()
def test_fetch_and_validate_observation_only_missing(
mocker, make_observation, daily_index):
obs = make_observation('ac_power')
data = pd.DataFrame(
[(0, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch('solarforecastarbiter.io.api.APISession.get_user_info',
return_value={'organization': obs.provider})
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values_not_flagged', # NOQA
return_value=np.array(['2019-01-01', '2019-01-02'],
dtype='datetime64[D]'))
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'token', 'obsid', data.index[0], data.index[-1], only_missing=True)
assert post_mock.called
assert (post_mock.call_args_list[0][0][1].index.date ==
dt.date(2019, 1, 1)).all()
assert (post_mock.call_args_list[1][0][1].index.date ==
dt.date(2019, 1, 2)).all()
def test__group_continuous_week_post(mocker, make_observation):
split_dfs = [
pd.DataFrame([(0, LATEST_VERSION_FLAG)],
columns=['value', 'quality_flag'],
index=pd.date_range(
start='2020-05-03T00:00',
end='2020-05-03T23:59',
tz='UTC',
freq='1h')),
# new week split
pd.DataFrame([(0, LATEST_VERSION_FLAG)],
columns=['value', 'quality_flag'],
index=pd.date_range(
start='2020-05-04T00:00',
end='2020-05-04T11:59',
tz='UTC',
freq='1h')),
# missing 12
pd.DataFrame(
[(0, LATEST_VERSION_FLAG | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'])] + # NOQA
[(1, LATEST_VERSION_FLAG)] * 7,
columns=['value', 'quality_flag'],
index=pd.date_range(
start='2020-05-04T13:00',
end='2020-05-04T20:00',
tz='UTC',
freq='1h')),
# missing a week+
pd.DataFrame(
[(9, LATEST_VERSION_FLAG | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'])] + # NOQA
[(3, LATEST_VERSION_FLAG)] * 7,
columns=['value', 'quality_flag'],
index=pd.date_range(
start='2020-05-13T09:00',
end='2020-05-13T16:59',
tz='UTC',
freq='1h')),
]
ov = pd.concat(split_dfs, axis=0)
obs = make_observation('ghi')
session = mocker.MagicMock()
tasks._group_continuous_week_post(session, obs, ov)
call_list = session.post_observation_values.call_args_list
assert len(call_list) == 4
for i, cal in enumerate(call_list):
assert_frame_equal(split_dfs[i], cal[0][1])
@pytest.mark.parametrize('vals,func', [
(pd.DataFrame({'value': 0, 'quality_flag': 4}, index=pd.DatetimeIndex(
[pd.Timestamp.utcnow()], name='timestamp')),
'apply_immediate_validation'),
(pd.DataFrame({'value': [0.0] * 5 + [None] * 10, 'quality_flag': 4},
index=pd.date_range('now', name='timestamp', freq='2h',
periods=15)),
'apply_immediate_validation'),
(pd.DataFrame({'value': [0.0] * 15 + [None] * 11, 'quality_flag': 4},
index=pd.date_range('now', name='timestamp', freq='1h',
periods=26)),
'apply_daily_validation'),
])
def test_apply_validation(make_observation, mocker, vals, func):
obs = make_observation('ac_power')
fmock = mocker.patch.object(tasks, func, autospec=True)
tasks.apply_validation(obs, vals)
assert fmock.called
def test_apply_validation_empty(make_observation, mocker):
obs = make_observation('dhi')
daily = mocker.patch.object(tasks, 'apply_daily_validation')
immediate = mocker.patch.object(tasks, 'apply_immediate_validation')
data = pd.DataFrame({'value': [], 'quality_flag': []},
index=pd.DatetimeIndex([], name='timestamp'))
out = tasks.apply_validation(obs, data)
assert_frame_equal(out, data)
assert not daily.called
assert not immediate.called
def test_apply_validation_bad_df(make_observation, mocker):
obs = make_observation('dhi')
data = pd.DataFrame()
with pytest.raises(TypeError):
tasks.apply_validation(obs, data)
with pytest.raises(TypeError):
tasks.apply_validation(obs, pd.Series(
index=pd.DatetimeIndex([]),
dtype=float))
def test_apply_validation_agg(aggregate, mocker):
data = pd.DataFrame({'value': [1], 'quality_flag': [0]},
index=pd.DatetimeIndex(
['2020-01-01T00:00Z'], name='timestamp'))
out = tasks.apply_validation(aggregate, data)
assert_frame_equal(data, out)
def test_find_unvalidated_time_ranges(mocker):
session = mocker.MagicMock()
session.get_observation_values_not_flagged.return_value = np.array(
['2019-04-13', '2019-04-14', '2019-04-15', '2019-04-16', '2019-04-18',
'2019-05-22', '2019-05-23'], dtype='datetime64[D]')
obs = mocker.MagicMock()
obs.observation_id = ''
obs.site.timezone = 'UTC'
out = list(tasks._find_unvalidated_time_ranges(
session, obs, '2019-01-01T00:00Z', '2020-01-01T00:00Z'))
assert out == [
(pd.Timestamp('2019-04-13T00:00Z'), pd.Timestamp('2019-04-17T00:00Z')),
(pd.Timestamp('2019-04-18T00:00Z'), pd.Timestamp('2019-04-19T00:00Z')),
(pd.Timestamp('2019-05-22T00:00Z'), pd.Timestamp('2019-05-24T00:00Z')),
]
def test_find_unvalidated_time_ranges_all(mocker):
session = mocker.MagicMock()
session.get_observation_values_not_flagged.return_value = np.array(
['2019-04-13', '2019-04-14', '2019-04-15', '2019-04-16'],
dtype='datetime64[D]')
obs = mocker.MagicMock()
obs.observation_id = ''
obs.site.timezone = 'Etc/GMT+7'
out = list(tasks._find_unvalidated_time_ranges(
session, obs, '2019-01-01T00:00Z', '2020-01-01T00:00Z'))
assert out == [
(pd.Timestamp('2019-04-13T00:00-07:00'),
pd.Timestamp('2019-04-17T00:00-07:00')),
]
def test_find_unvalidated_time_ranges_single(mocker):
session = mocker.MagicMock()
session.get_observation_values_not_flagged.return_value = np.array(
['2019-04-13'], dtype='datetime64[D]')
obs = mocker.MagicMock()
obs.observation_id = ''
obs.site.timezone = 'Etc/GMT+5'
out = list(tasks._find_unvalidated_time_ranges(
session, obs, '2019-01-01T00:00Z', '2020-01-01T00:00Z'))
assert out == [
(pd.Timestamp('2019-04-13T00:00-05:00'),
pd.Timestamp('2019-04-14T00:00-05:00')),
]
def test_find_unvalidated_time_ranges_disjoint(mocker):
session = mocker.MagicMock()
session.get_observation_values_not_flagged.return_value = np.array(
['2019-04-13', '2019-05-22'], dtype='datetime64[D]')
obs = mocker.MagicMock()
obs.observation_id = ''
obs.site.timezone = 'Etc/GMT+5'
out = list(tasks._find_unvalidated_time_ranges(
session, obs, '2019-01-01T00:00Z', '2020-01-01T00:00Z'))
assert out == [
(pd.Timestamp('2019-04-13T00:00-05:00'),
pd.Timestamp('2019-04-14T00:00-05:00')),
( | pd.Timestamp('2019-05-22T00:00-05:00') | pandas.Timestamp |
import pandas as pd
from tqdm import tqdm
tqdm.pandas()
import os
import glob
import json
import sys
import gzip
from helpers import create_folder_path, write_csv_file, write_list_to_txt, save_pk_file
def read_jsonl_file( file_name, columns):
"""
Read jsonl file and create a data frame of the required data
file_name: the name of the read file
columns: a list of columns that need to be extracted,
if columns is None, all data from the jsonl file will be converted to dataframe
sort_value: the value that is used to sort the dataframe, default value is "id"
Return a dataframe of data required to be extracted
"""
with open(file_name,'r', encoding = 'utf-8') as jfile:
records = [json.loads(line) for line in jfile]
df = pd.DataFrame.from_records(records)
sorted_df = df.sort_values(by="id", ascending=True)
if columns == None:
return sorted_df
else:
cleaned_df = sorted_df[columns]
return cleaned_df
def prepare_json_data(inst_file, truth_file, inst_columns = None , truth_columns = None):
"""
Read the files from the corpus including an instance file and a truth file.
inst_file: the path to the instance file
truth_file: the path to the truth file
inst_columns: a list of columns required to extracted from the instance file, default value is None meaning all data are needed
truth_column: a list of columns required to extracted from the truth file, default value is None meaning all data are needed
Return a dataframe that is the combination of data from the instance file and the truth file
"""
inst_df = read_jsonl_file(inst_file, inst_columns)
truth_df = read_jsonl_file(truth_file, truth_columns)
merged_df = pd.merge(inst_df, truth_df, on = 'id')
return merged_df
def split_json_data(df, folder, column = "truthScale"):
"""
Split data into two subset according to the label
dataframe: the original dataframe
column: the name of the columns that contain labels
folder: the path to the folder containing new data file
Return the path to the new file
"""
value_set = set(df[column])
for value in value_set:
splited_data = df[df[column]== value]
headline = list(splited_data["targetTitle"])
textbody = list(splited_data["targetParagraphs"])
headline_file_path = f'{folder}/headline_{value}'
textbody_file_fath = f'{folder}/textbody_{value}'
write_list_to_txt(headline,headline_file_path)
write_list_to_txt(textbody,textbody_file_fath)
def read_jsonl_folder(json_folder):
"""
Read the instance.jsonl and truth.jsonl the folder
json_folder: the path to the folder that contain the two files
write_folder: the path to the folder that contain the outfile
Return the name of the outfile
"""
inst_columns = ['id',"targetTitle","targetParagraphs"]#, 'postMedia','postText']
truth_columns = ["id","truthClass"]#, "truthMode","truthJudgments"]
path_inst_file = json_folder+"/instances.jsonl"
path_truth_file = json_folder+"/truth.jsonl"
merged_df = prepare_json_data(path_inst_file, path_truth_file, inst_columns, truth_columns)
merged_df["targetTitle"] = merged_df["targetTitle"].progress_map(lambda x: str(x).strip("[").strip(']').strip("\'").strip('\"'))
#merged_df['postText'] = merged_df['postText'].progress_map(lambda x: ' '.join(map(str, x)))
#merged_df['postMedia'] = merged_df['postMedia'].progress_map(lambda x: 0 if x == "[]" else 1)
merged_df['targetParagraphs'] = merged_df['targetParagraphs'].progress_map(lambda x: ' '.join(map(str, x)))
#merged_df["truthScale"] = merged_df["truthMode"].progress_map(lambda x: "non" if x == 0.0 else ("slightly" if 0.3<x<0.6 else ("considerable" if 0.6<x<1 else "heavy")))
merged_df["truthClass"] = merged_df["truthClass"].progress_map(lambda x: "CB" if x == "clickbait" else "Non")
drop_df = merged_df[~merged_df.targetTitle.str.contains("Sections Shows Live Yahoo!")]
final_df = drop_df[~drop_df.targetTitle.str.contains("Top stories Top stories")]
write_csv_file(final_df, json_folder)
pk_file = save_pk_file(final_df, json_folder)
#split_json_data(final_df, save_to)
print(final_df[:3])
return pk_file
def gz_to_txt(gz_file, txt_file):
"""
Convert gz file to txt file and convert content format from byte to utf8
gz_file: the path gz file that need to be converted
txt_file: the path gz file that need to be converted
Print a statement that file created
"""
with gzip.open(gz_file, 'rb') as outfile:
file_content = outfile.read()
with open (txt_file,"w", encoding="utf8") as infile:
infile.write(file_content.decode("utf-8"))
print( "File {} created".format(txt_file))
def read_txt(file_name):
"""
Read txt file and return a dataframe containing the data
file_name: the name of txt file
"""
with open (file_name, "r", encoding = "utf8") as infile:
content = infile.readlines()
df = pd.DataFrame()
lines = []
for line in content:
if line != "\n":
new_line = line.strip("\n")
lines.append(new_line)
df["targetTitle"] = lines
df["truthClass"] = "Non" if "non" in file_name else "CB"
return df
def read_gz_folder(gz_folder):
"""
read .gz files and return a dataframe contain the data in the file
gz_folder: path to folder containing .gz files
"""
df_list = []
for read_file in tqdm(glob.glob(os.path.join(gz_folder, '*.gz'))):
file_name = read_file.replace(".gz", ".txt")
gz_to_txt(read_file, file_name)
df = read_txt(file_name)
df_list.append(df)
merged_df = | pd.concat(df_list) | pandas.concat |
from IPython.core.error import UsageError
from mock import MagicMock
import numpy as np
from nose.tools import assert_equals, assert_is
import pandas as pd
from pandas.util.testing import assert_frame_equal
from sparkmagic.livyclientlib.exceptions import BadUserDataException
from sparkmagic.utils.utils import parse_argstring_or_throw, records_to_dataframe
from sparkmagic.utils.constants import SESSION_KIND_PYSPARK
def test_parse_argstring_or_throw():
parse_argstring = MagicMock(side_effect=UsageError('OOGABOOGABOOGA'))
try:
parse_argstring_or_throw(MagicMock(), MagicMock(), parse_argstring=parse_argstring)
assert False
except BadUserDataException as e:
assert_equals(str(e), str(parse_argstring.side_effect))
parse_argstring = MagicMock(side_effect=ValueError('AN UNKNOWN ERROR HAPPENED'))
try:
parse_argstring_or_throw(MagicMock(), MagicMock(), parse_argstring=parse_argstring)
assert False
except ValueError as e:
assert_is(e, parse_argstring.side_effect)
def test_records_to_dataframe_missing_value_first():
result = """{"z":100, "y":50}
{"z":25, "nullv":1.0, "y":10}"""
df = records_to_dataframe(result, SESSION_KIND_PYSPARK, True)
expected = | pd.DataFrame([{'z': 100, "nullv": None, 'y': 50}, {'z':25, "nullv":1, 'y':10}], columns=['z', "nullv", 'y']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
pytests for resource extractors
"""
import numpy as np
import os
import pandas as pd
import pytest
from rex.resource_extraction import (MultiFileWindX, MultiFileNSRDBX,
NSRDBX, WindX)
from rex import TESTDATADIR
@pytest.fixture
def NSRDBX_cls():
"""
Init NSRDB resource handler
"""
path = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5')
return NSRDBX(path)
@pytest.fixture
def MultiFileNSRDBX_cls():
"""
Init NSRDB resource handler
"""
path = os.path.join(TESTDATADIR, 'nsrdb', 'nsrdb*2018.h5')
return MultiFileNSRDBX(path)
@pytest.fixture
def WindX_cls():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5')
return WindX(path)
@pytest.fixture
def MultiFileWindX_cls():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk', 'wtk*m.h5')
return MultiFileWindX(path)
def check_props(res_cls):
"""
Test extraction class properties
"""
meta = res_cls['meta']
assert np.all(np.in1d(res_cls.countries, meta['country'].unique()))
assert np.all(np.in1d(res_cls.states, meta['state'].unique()))
assert np.all(np.in1d(res_cls.counties, meta['county'].unique()))
def extract_site(res_cls, ds_name):
"""
Run tests extracting a single site
"""
time_index = res_cls['time_index']
meta = res_cls['meta']
site = np.random.choice(len(meta), 1)[0]
lat_lon = meta.loc[site, ['latitude', 'longitude']].values
truth_ts = res_cls[ds_name, :, site]
truth_df = | pd.DataFrame({ds_name: truth_ts}, index=time_index) | pandas.DataFrame |
"""
Tests for DatetimeIndex timezone-related methods
"""
from datetime import date, datetime, time, timedelta, tzinfo
import dateutil
from dateutil.tz import gettz, tzlocal
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import conversion, timezones
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timestamp,
bdate_range,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, "-07:00")
fixed_off_no_name = FixedOffset(-330, None)
class TestDatetimeIndexTimezones:
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
# GH#5546
dates = [pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern"))
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC"))
dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx + pd.offsets.Hour(5)
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx.tz_convert("US/Pacific")
expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx + np.timedelta64(3, "h")
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
conv = idx[0].tz_convert(prefix + "US/Pacific")
expected = idx.tz_convert(prefix + "US/Pacific")[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2009-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2009-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2008-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2008-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize("UTC")
idx = idx.tz_convert("Europe/Moscow")
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_dti_tz_convert_dst(self):
for freq, n in [("H", 1), ("T", 60), ("S", 3600)]:
# Start DST
idx = date_range(
"2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range(
"2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range(
"2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range(
"2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tz_convert_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC")
exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M")
idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC")
exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D")
idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC")
exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H")
idx4 = | date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC") | pandas.date_range |
#!/usr/bin/env python
#import standard libraries
import obspy.imaging.beachball
import datetime
import os
import csv
import pandas as pd
import numpy as np
import fnmatch
from geopy.distance import geodesic
from math import *
#from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib import path
class NewFile:
'''Creates a file object with associated uncertainty and event type'''
def __init__(self, filename, unc, event_type, source):
self.filename = filename
self.event_type = event_type
self.unc = unc
self.name = source
def maketime(timestring):
'''Used in argument parser below. Makes a datetime object from a timestring.'''
TIMEFMT = '%Y-%m-%dT%H:%M:%S'
DATEFMT = '%Y-%m-%d'
TIMEFMT2 = '%m-%d-%YT%H:%M:%S.%f'
outtime = None
try:
outtime = datetime.strptime(timestring, TIMEFMT)
except:
try:
outtime = datetime.strptime(timestring, DATEFMT)
except:
try:
outtime = datetime.strptime(timestring, TIMEFMT2)
except:
print('Could not parse time or date from %s' % timestring)
print (outtime)
return outtime
def infile(s):
'''Stores filename, event type, and uncertainty where provided from comma separated string.'''
default_uncertainty = 15
try:
infile,unc,etype = s.split(',')
unc = float(unc)
return (infile, unc, etype)
except:
try:
s = s.split(',')
infile, unc, etype = s[0], default_uncertainty, s[1]
return (infile, unc, etype)
except:
raise argparse.ArgumentTypeError('Input file information must be \
given as infile,unc,etype or as infile,etype')
def datelinecross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a positive longitude. Stays the same if the input was positive,
is changed to positive if the input was negative '''
if x<0:
return x+360
else:
return x
###############################################
### 9 ###
###############################################
## Written GLM
def meridiancross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x>180:
return x-360
else:
return x
def northcross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x<90:
return x+360
else:
return x
def unnorthcross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x>360:
return x-360
else:
return x
def zerothreesixty(data):
data['lon']=data.apply(lambda row: datelinecross(row['lon']),axis=1)
return data
def oneeighty(data):
data['lon']=data.apply(lambda row: meridiancross(row['lon']),axis=1)
return data
def northernaz(data):
data['az']=data.apply(lambda row: northcross(row['az']),axis=1)
return data
def notnorthanymore(data):
data['az']=data.apply(lambda row: unnorthcross(row['az']),axis=1)
return data
def writetofile(input_file, output_file, event_type, uncertainty, args, catalogs, file_no, seismo_thick, slabname, name):
''' Writes an input file object to the given output file.
Acquires the necessary columns from the file, calculates moment tensor information.
Eliminates rows of data that do not fall within the specified bounds
(date, magnitude, & location).
If the event type is an earthquake, the catalog is compared to all previously
entered catalogs. Duplicate events are removed from the subsequent entries
(prioritization is determined by the order in which catalogs are entered).
Writes filtered dataframe to output file and prints progress to console.
Arguments: input_file - input file from input or slab2database
output_file - file where new dataset will be written
event_type - two letter ID that indicates the type of data (AS, EQ, BA, etc)
uncertainty - the default uncertainty associated with this file or event type
args - arguments provided from command line (bounds, magnitude limits, etc)
catalogs - a list of EQ catalogs that are being written to this file
file_no - file number, used for making event IDs '''
in_file = open(input_file)
fcsv = (input_file[:-4]+'.csv')
# Reading .csv file into dataframe - all files must be in .csv format
try:
if input_file.endswith('.csv'):
data = pd.read_csv(input_file, low_memory=False)
else:
print ('Input file %s was not written to file. MUST BE IN .CSV FORMAT' % input_file)
pass
except:
print ('Could not read file %s. A header line of column labels \
followed by a deliminated dataset is expected. Check file format to ensure this \
is such. All files must be in .csv format.' % input_file)
if 'ID' in data.columns:
pass
elif 'id_no' in data.columns:
data['ID'] = data['id_no'].values
else:
start_ID = file_no*100000
stop_ID = start_ID + len(data)
ID = np.arange(start_ID, stop_ID, 1)
data['ID'] = ID
data = makeframe(data, fcsv, event_type, uncertainty, args, seismo_thick,slabname)
data = inbounds(args, data, slabname)
#If option is chosen at command line, removes duplicate entries for the same event
#alternate preference for global or regional catalogues depending upon input arguments
try:
regional_pref
except NameError:
pass
else:
try:
tup = (data, fcsv)
if len(catalogs) > 0:
for idx, row in enumerate(catalogs):
if fnmatch.fnmatch(row, '*global*'):
position = idx
name_of_file = row
if regional_pref == 0 and position != 0:
first_file = catalogs[0]
catalogs[position] = first_file
catalogs[0] = name_of_file
elif regional_pref == 1 and position != (len(catalogs)-1):
last_file = catalogs[(len(catalogs)-1)]
catalogs[position] = first_file
catalogs[(len(catalogs)-1)] = name_of_file
else:
pass
for cat in catalogs:
data = rid_matches(cat[0], data, cat[1], fcsv)
elif len(catalogs) == 0:
catalogs.append(tup)
except:
print ('If file contains earthquake information (event-type = EQ), \
required columns include: lat,lon,depth,mag,time. The columns of the current \
file: %s. Check file format to ensure these columns are present and properly \
labeled.' % data.columns)
#MF 8.9.16 add source to output file
try:
listints = data['ID'].values.astype(int)
except:
start_ID = file_no*100000
stop_ID = start_ID + len(data)
ID = np.arange(start_ID, stop_ID, 1)
data['id_no'] = data['ID'].values
data['ID'] = ID
data['src'] = name
write_data(data, output_file)
print ('The file: %s was written to %s' % (input_file, output_file))
print ('---------------------------------------------------------------------------------')
def castfloats(data):
'''Casts all numerical and nan values to floats to avoid error in calculations'''
data[['lat']] = data[['lat']].astype(float)
data[['lon']] = data[['lon']].astype(float)
data[['depth']] = data[['depth']].astype(float)
data[['unc']] = data[['unc']].astype(float)
if 'mag' in data.columns:
data[['mag']] = data[['mag']].astype(float)
if 'mrr' in data.columns:
data[['mrr']] = data[['mrr']].astype(float)
data[['mtt']] = data[['mtt']].astype(float)
data[['mpp']] = data[['mpp']].astype(float)
data[['mrt']] = data[['mrt']].astype(float)
data[['mrp']] = data[['mrp']].astype(float)
data[['mtp']] = data[['mtp']].astype(float)
if 'Paz' in data.columns and 'Ppl' in data.columns:
data[['Paz']] = data[['Paz']].astype(float)
data[['Ppl']] = data[['Ppl']].astype(float)
data[['Taz']] = data[['Taz']].astype(float)
data[['Tpl']] = data[['Tpl']].astype(float)
data[['S1']] = data[['S1']].astype(float)
data[['D1']] = data[['D1']].astype(float)
data[['R1']] = data[['R1']].astype(float)
data[['S2']] = data[['S2']].astype(float)
data[['D2']] = data[['D2']].astype(float)
data[['R2']] = data[['R2']].astype(float)
return data
def rid_nans(df):
'''Removes points where lat,lon,depth, or uncertainty values are not provided.'''
df = df[np.isfinite(df['lat'])]
df = df[np.isfinite(df['lon'])]
df = df[np.isfinite(df['depth'])]
df = df[np.isfinite(df['unc'])]
return df
def write_data(df, output_file):
''' Arguments: df - filtered dataframe to be written to file
output_file - output file where data is to be written '''
# If file name does not exist, creates file and writes filtered dataframe to it
df = castfloats(df)
df = rid_nans(df)
if not os.path.isfile(output_file):
with open(output_file, 'w') as f:
df.to_csv(f, header=True, index=False, float_format='%0.3f', na_rep = float('nan'))
# If the output file already exists, new filtered data points are appended to
# existing information
else:
old = pd.read_csv(output_file)
all = pd.concat([old,df],sort=True)
all = castfloats(all)
all = rid_nans(all)
if len(df.columns) > len(old.columns):
all = all[df.columns]
else:
all = all[old.columns]
# Writes desired columns of a filtered dataframe to the output file
with open(output_file, 'w') as f:
all.to_csv(f, header=True, index=False, float_format='%0.3f', na_rep = float('nan'))
def inbounds(args, data, slab):
''' Originally written by Ginvera, modified by MAF July 2016 '''
''' Arguments: args - input arguments provided from command line arguments
data - dataframe to be filtered based on bounds
Returns: data - filtered dataframe based on bounds '''
# Eliminates data points that are not within specified bounds where provided
if 'time' in data.columns:
try:
data['time'] = pd.to_datetime(data['time'])
except:
try:
data['time'] = pd.to_datetime(data['time'],format='%m-%d-%YT%H:%M:%S')
except:
try:
data['time'] = pd.to_datetime(data['time'],format='%m-%d-%YT%H:%M:%S.%f')
except:
data = data[data.time != '9-14-2012T29:54:59.53']
data = data.reset_index(drop=True)
for index,row in data.iterrows():
print (row['time'])
try:
row['time'] = pd.to_datetime(row['time'],format='%m-%d-%YT%H:%M:%S')
except:
try:
row['time'] = pd.to_datetime(row['time'],format='%m-%d-%YT%H:%M:%S.%f')
except:
print ('this row could not be added, invalid time')
print ('lon,lat,depth,mag,time')
print (row['lon'],row['lat'],row['depth'],row['mag'],row['time'])
data.drop(index, inplace=True)
stime = datetime.datetime(1900,1,1)
etime = datetime.datetime.utcnow()
if args.startTime and args.endTime and args.startTime >= args.endTime:
print ('End time must be greater than start time. Your inputs: Start %s \
End %s' % (args.startTime, args.endTime))
sys.exit(1)
if args.bounds is not None:
lonmin = args.bounds[0]
lonmax = args.bounds[1]
latmin = args.bounds[2]
latmax = args.bounds[3]
minwest = lonmin > 0 and lonmin < 180
maxeast = lonmax < 0 and lonmax > -180
if minwest and maxeast:
data = data[(data.lon >= lonmin) | (data.lon <= lonmax)]
else:
data = data[(data.lon >= lonmin) & (data.lon <= lonmax)]
data = data[(data.lat >= latmin) & (data.lat <= latmax)]
else:
#first filter data within the slab outline (just gets locations though - doesn't filter by rest of info!)
#also, original data was a dataframe
data = getDataInRect(slab,data)
if len(data) > 0:
data_lon = data['lon']
data_lat = data['lat']
data_coords = list(zip(data_lon,data_lat))
indexes_of_bad_data = getDataInPolygon(slab,data_coords)
data_to_keep = data.drop(data.index[indexes_of_bad_data])
data = data_to_keep
else:
return data
if args.startTime is not None and 'time' in data.columns:
stime = args.startTime
data = data[data.time >= stime]
if args.endTime is not None and 'time' in data.columns:
etime = args.endTime
data = data[data.time <= etime]
if args.magRange is not None and 'mag' in data.columns:
magmin = args.magRange[0]
magmax = args.magRange[1]
data = data[(data.mag >= magmin) & (data.mag <= magmax)]
return data
def slabpolygon(slabname):
#####################################
#written by <NAME>, 7/19/2016#
#####################################
'''
inputting the slabname (3 character code) will return the polygon boundaries
'''
#load file with slab polygon boundaries
slabfile = 'library/misc/slab_polygons.txt'
filerows = []
with open(slabfile) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
filerows.append(row)
csvfile.close()
#iterate through list to match the slabname and retrieve coordinates
slabbounds = []
for i in range(len(filerows)):
if slabname == filerows[i][0]:
slabbounds = filerows[i][1:]
slabbounds.append(slabbounds)
return slabbounds
def determine_polygon_extrema(slabname):
#####################################
#written by <NAME>, 7/18/2016#
#####################################
'''
inputs: slabname to be referenced against stored slab coordinates
outputs: the maximum and minimum latitude and longitude values for the input slab
'''
#calls slabpolygon function to get bounds for this slab region
slabbounds = slabpolygon(slabname)
#slabbbounds come in lon1,lat1,lon2,lat2... format
#even numbers are then longitudes while odds are latitudes
coords = np.size(slabbounds)
#simple even/odd function
def is_odd(num):
return num & 0x1
lons = []
lats = []
for i in range(coords):
val = slabbounds[i]
if is_odd(i):
lats.append(val)
else:
lons.append(val)
x1 = int(min(lons))
x2 = int(max(lons))
y1 = int(min(lats))
y2 = int(max(lats))
return x1,x2,y1,y2
def create_grid_nodes(grd_space,slabname):
#####################################
#written by <NAME>, 7/18/2016#
#####################################
'''
inputs: grid spacing between nodes of regular grid (must be an integer), slab code
outputs: coordinates of each node (corner/intersection) within the regular grid (numpy array)
'''
xmin,xmax,ymin,ymax = determine_polygon_extrema(slabname)
total_degrees_lon = xmax-xmin
total_degrees_lat = ymax-ymin
#max_iter represents max number of iterations in the y direction (longitude direction)
max_iter = total_degrees_lon/grd_space
#define a grid to divide the area
#accounts for a non-even division
q1, r1 = divmod(total_degrees_lat, grd_space)
q2, r2 = divmod(total_degrees_lon, grd_space)
if r1 > 0:
grid_y = total_degrees_lat/grd_space
else:
grid_y = total_degrees_lat/grd_space + 1
if r2 > 0:
grid_x = total_degrees_lon/grd_space
else:
grid_x = total_degrees_lon/grd_space + 1
#the total number of grids
boxes = grid_y*grid_x
#initialize array to save time
boundaries = np.zeros([boxes,4])
'''
count keeps track of iterations of longitude
holds latmin/latmax steady while lonmin/lonmax changes across
when max iterations in longitude have completed (gone across area)
the latmin/latmix will adjust and lonmin/lonmax will also be reset.
This process will continue until the number of boxes has been reached.
'''
count = 0
for i in range(boxes):
if count == max_iter-1:
lonmax = xmax + grd_space*count
lonmin = xmin + grd_space*count
count = 0
latmax = ymax
latmin = ymin
boundaries[i,0] = lonmin
boundaries[i,1] = lonmax
boundaries[i,2] = latmin
boundaries[i,3] = latmax
ymax = ymax - grd_space
ymin = ymin - grd_space
else:
lonmax = xmax + grd_space*count
lonmin = xmin + grd_space*count
count = count+1
latmax = ymax
latmin = ymin
boundaries[i,0] = lonmin
boundaries[i,1] = lonmax
boundaries[i,2] = latmin
boundaries[i,3] = latmax
return boundaries
def getDataInPolygon(slabname,data):
#####################################
#written by <NAME>, 7/20/2016#
#####################################
''' creates a grid of 1 or nan based on if they are within a clipping mask or not. DEP.6.29.16 '''
''' modified to fit this script by MAF 7/18/16 '''
### Input:
# slabname: a 3 digit character code identifying a slab region
#data: the input data which may or may not be within the polygon
### Output:
#contained_data: an array of coordinate pairs (lon,lat) that reside within the polygon region
#check if slabbounds are already defined. If not, acquire them
slabbounds = slabpolygon(slabname)
#slabbbounds come in lon1,lat1,lon2,lat2... format
#even numbers are then longitudes while odds are latitudes
coords = np.size(slabbounds)
#simple even/odd function
def is_odd(num):
return num & 0x1
lons = []
lats = []
for i in range(coords):
val = slabbounds[i][1:]
if is_odd(i):
lats.append(val)
else:
lons.append(val)
#create tuple of locations (with zip) to use in contains_points
xy = list(zip(lons,lats))
poly = path.Path(xy)
temp = poly.contains_points(data[:])
mask = np.zeros(len(temp),)*np.nan
mask[temp] = 1
keepers = []
for i in range(len(data)):
points_in_poly = np.dot(mask[i],data[i])
if i > 0:
keepers = np.vstack((keepers,points_in_poly))
else:
keepers = points_in_poly
rows_to_drop = []
for i in range(len(keepers)):
if np.isnan(keepers[i][0]) == True:
rows_to_drop.append(i)
return rows_to_drop
def getDataInRect(slabname,data1):
#####################################
#written by <NAME>, 7/20/2016#
#####################################
''' creates a grid of 1 or nan based on if they are within a clipping mask or not. DEP.6.29.16 '''
''' modified to fit this script by MAF 7/18/16 '''
### Input:
# slabname: a 3 digit character code identifying a slab region
#data: the input data which may or may not be within the polygon
### Output:
#contained_data: an array of coordinate pairs (lon,lat) that reside within the polygon region
#check if slabbounds are already defined. If not, acquire them
slabbounds = slabpolygon(slabname)
#slabbbounds come in lon1,lat1,lon2,lat2... format
#even numbers are then longitudes while odds are latitudes
coords = np.size(slabbounds)
#simple even/odd function
def is_odd(num):
return num & 0x1
lons = []
lats = []
for i in range(coords):
val = slabbounds[i][1:]
try:
val = float(val)
except:
break
if is_odd(i):
lats.append(val)
else:
lons.append(val)
lonmin = min(lons)
lonmax = max(lons)
latmin = min(lats)
latmax = max(lats)
if lonmin < 0 and lonmax < 0:
data1 = oneeighty(data1)
else:
data1 = zerothreesixty(data1)
data1 = data1[(data1.lon > lonmin) & (data1.lon < lonmax) &(data1.lat > latmin) &(data1.lat < latmax)]
return data1
def cmtfilter(data,seismo_thick):
''' Arguments: data - data with all shallow/nonshallow and thrust/nonthrust earthquake
Returns: filtered - fitered dataframe which DEPENDS ON WHAT YOU DO/DONT COMMENT OUT
(1) filters only shallow earthquakes that have MT criteria which are non thrust
all other shallow earthquakes WITHOUT MT info are NOT filtered
OR
(2) filters ALL shallow earthquakes UNLESS they have MT info and that
MT info has the criteria of a thrust event. '''
# Removes non-thrust events from depths shallower than seismogenic zone
deep_data = data[data.depth >= seismo_thick]
# Includes shallow data without MT info (1) - comment out next two lines for (2)
dfn = data[np.isnan(data['Paz'])]
dfn = dfn[data.depth < seismo_thick]
data = data[np.isfinite(data['Paz'])]
shallow_data = data[data.depth < seismo_thick]
# Depending on which MT info are provided, filters non-thrust, shallow events
if 'Ndip' in shallow_data.columns:
thrust_rake = (shallow_data.Tpl>50) & (shallow_data.Ndip<=30)
else:
thrust_rake = ((shallow_data.R1>30) & (shallow_data.R2>30)
& (shallow_data.R1<150) & (shallow_data.R2<150))
shallow_data = shallow_data[thrust_rake]
# Includes shallow data without MT info (1) - comment out next line for (2)
filtered = pd.concat([deep_data, shallow_data, dfn],sort=True)
# Only includes shallow thrust events (2) - uncomment line below for (2) and comment necessary lines above
# filtered = pd.concat([deep_data, shallow_data],sort=True)
# Rearranges columns / filters out unecessary columns
filtered=filtered[['lat','lon','depth','unc','ID','etype','mag','time',
'Paz','Ppl','Taz','Tpl','S1','D1','R1','S2','D2','R2','mlon','mlat','mdep']]
return filtered
def make_moment_tensor(mrr,mtt,mpp,mrt,mrp,mtp): #r,t,p = x,y,z
'''Used in m_to_planes below. Makes a moment tensor object from moment tensor components'''
return obspy.imaging.beachball.MomentTensor(mrr,mtt,mpp,mrt,mrp,mtp,1)
def m_to_planes(mrr,mtt,mpp,mrt,mrp,mtp,n):
'''Takes a moment tensor and calculates the P, N, and T axes and nodal plane information.
Used in moment_calc below. Returns one of these values as specified by input (n).
The integer input specifies which index of the array of outputs to return. '''
mt = make_moment_tensor(mrr,mtt,mpp,mrt,mrp,mtp)
#axes = obspy.imaging.beachball.MT2Axes(mt) #returns T, N, P
#fplane = obspy.imaging.beachball.MT2Plane(mt)#returns strike, dip, rake
#aplane = obspy.imaging.beachball.AuxPlane(fplane.strike, fplane.dip, fplane.rake)
#MAF changed because functions use lowercase, and aux_plane name includes underscore
axes = obspy.imaging.beachball.mt2axes(mt) #returns T, N, P
fplane = obspy.imaging.beachball.mt2plane(mt)#returns strike, dip, rake
aplane = obspy.imaging.beachball.aux_plane(fplane.strike, fplane.dip, fplane.rake)
Tstrike = axes[0].strike
Tdip = axes[0].dip
Pstrike = axes[2].strike
Pdip = axes[2].dip
S1 = fplane.strike
D1 = fplane.dip
R1 = fplane.rake
S2 = aplane[0]
D2 = aplane[1]
R2 = aplane[2]
mplanes = [Pstrike,Pdip,Tstrike,Tdip,S1,D1,R1,S2,D2,R2]
return mplanes[n]
def moment_calc(df, args, seismo_thick,slabname):
''' Creates and appends columns with Principal Axis and Nodal Plane information.
Used in makeframe below. Takes moment tensor information from input dataframe
columns and creates 11 new columns with information used to distinguish between thrust
and non-thrust earthquakes.
Arguments: df - dataframe with mt information in the form mrr,mtt,mpp,mrt,mrp,mtp
args - input arguments provided from command line arguments
Returns: df - dataframe with mt information in the form Paz,Ppl,Taz,Tpl,S1,D1,R1,S2,D2,R2
'''
#try:
# Only calculates MT info where it exists in EQ datasets
df = inbounds(args, df, slabname)
dfm = df[np.isfinite(df['mrr'])]
dfn = df[df['mrr'].isnull()]
#except:
# raise Exception,'If file contains earthquake information (event-type = EQ), \
# required columns include: lat,lon,depth,mag,time. The columns of the current \
# file: %s. Check file format to ensure these columns are present and properly \
# labeled.' % df.columns
# Calculates each new column of MT info
try:
dfm['Paz']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],0),axis=1)
dfm['Ppl']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],1),axis=1)
dfm['Taz']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],2),axis=1)
dfm['Tpl']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],3),axis=1)
dfm['S1']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],4),axis=1)
dfm['D1']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],5),axis=1)
dfm['R1']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],6),axis=1)
dfm['S2']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],7),axis=1)
dfm['D2']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],8),axis=1)
dfm['R2']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],9),axis=1)
# Concatenates events with and without MT info
#dfm = cmtfilter(dfm,seismo_thick)
df = pd.concat([dfm,dfn],sort=True)
# Rearranges columns and returns
if 'mlon' in df.columns:
df = df[['lat','lon','depth','unc','ID','etype','mag','time',
'Paz','Ppl','Taz','Tpl','S1','D1','R1','S2','D2','R2','mlon','mlat','mdep']]
else:
df = df[['lat','lon','depth','unc','ID','etype','mag','time',
'Paz','Ppl','Taz','Tpl','S1','D1','R1','S2','D2','R2']]
df['mlon'] = df['lon'].values*1.0
df['mlat'] = df['lat'].values*1.0
df['mdep'] = df['depth'].values*1.0
return df
except:
# if exception is caught, try to return only events without MT info
try:
if len(dfm) == 0:
return dfn
except:
print('Where moment tensor information is available, columns \
must be labeled: mrr,mpp,mtt,mrp,mrt,mtp')
def ymdhmsparse(input_file):
'''Parses Yr Mo Day Hr Min Sec into one datetime object when provided in distinguished columns.
Used in makeframe below. Returns a new dataframe with parsed datetimes. '''
ymdhms = {'time':['year','month','day','hour','min','sec']}
dparse = lambda x: pd.datetime.strptime(x, '%Y %m %d %H %M %S')
cols = ['year','month','day','hour','min','sec','lat','lon','depth','mag']
data = pd.read_csv(input_file, parse_dates=ymdhms, usecols=cols, date_parser=dparse)
return data
def raiseUnc(x):
''' Raises unreasonably low uncertainties for earthquakes to a value greater
than that of average active source data points (which is 5 km). '''
if x < 6:
return 6
else:
return x
def makeframe(data, fcsv, event_type, uncertainty, args, seismo_thick,slabname):
''' Arguments: data - semi-filtered data frame to be filtered more and written to file
fcsv - filename of output file
event_type - kind of data i.e. BA, EQ, ER, TO etc
uncertainty - unc value provided in command line or set by default for etype
args - input arguments provided from command line arguments
Returns: data - fully filtered dataset to be written to output file '''
# Parses Yr Mo Day Hr Min Sec into one datetime object when provided in distinguished columns
if 'year' in data.columns and 'sec' in data.columns and 'mag' in data.columns:
data = ymdhmsparse(fcsv)
# If ISC-GEM data is provided, high quality, low uncertainties are included in place of
# the default values assigned in s2d.py main method.
if 'unc' in data.columns and 'q' in data.columns:
try:
data = data[(data.uq != 'C') & (data.unc < uncertainty)]
except:
print ('When adding a file with uncertainty quality, the column \
representing that quality must be labeled as uq')
# uses OG uncertainties where provided. Raises them if they are unreasonably low
elif 'unc' in data.columns:
uncert = data['unc'].values
try:
if isnan(uncert[1]):
data['unc'] = uncertainty
elif event_type == 'EQ':
data['unc'] = data.apply(lambda row: raiseUnc(row['unc']),axis=1)
else:
pass
except:
data['unc'] = uncertainty
# If no uncertainty column is included, the one provided in command line arguments is
# used to add a new column to the data, alternatively, the default value assigned in s2d.py is used
else:
data['unc'] = uncertainty
pd.options.mode.chained_assignment = None
# A new column marking the event type is added to the data. Everything is cast as a float
data['etype'] = event_type
data = castfloats(data)
# Calculates moment tensor info where applicable and removes shallow, non-thrust events
if 'mrr' in data.columns:
data = moment_calc(data, args, seismo_thick,slabname)
elif 'time' in data.columns and 'mag' in data.columns:
data = data[['lat','lon','depth','unc','ID','etype','mag','time']]
else:
pass
return data
##########################################################################################################
#The following serves to create a rough plot of the data types compiled with s2d.py.
##########################################################################################################
def plot_map(lons, lats, c, legend_label, projection='mill',
llcrnrlat=-80, urcrnrlat=90, llcrnrlon=-180, urcrnrlon=180, resolution='i'):
''' Optional Arguments: projection - map projection, default set as 'mill'
llcrnrlat - lower left corner latitude value, default is -80
urcrnrlat - upper right corner latitude value, default is 90
llcrnrlon - lower left corner longitude value, default is -180
urcrnrlon - upper right corner longitude value, default is 180
resolution - the resolution of the plot, default is 'i'
Required Arguments: lons - list of longitude values to be plotted
lats - list of latitude values to be plotted
c - the color of the points to be plotted
legend_label - how this set of points will be labeled on the legend
Returns: m - a basemap object defined by input bounds with input points included '''
# Creates a basic plot of a series of lat,lon points over a defined region
m = Basemap(projection=projection, llcrnrlat=llcrnrlat, urcrnrlat=urcrnrlat,
llcrnrlon=llcrnrlon, urcrnrlon=urcrnrlon, resolution=resolution)
m.drawcoastlines()
m.drawmapboundary()
m.drawcountries()
m.etopo()
m.drawmeridians(np.arange(llcrnrlon, urcrnrlon, 5), labels=[0,0,0,1], fontsize=10)
m.drawparallels(np.arange(llcrnrlat, urcrnrlat, 5), labels=[1,0,0,0], fontsize=10)
x,y = m(lons, lats)
m.scatter(x, y, color=c, label=legend_label, marker='o', edgecolor='none', s=10)
return m
def datelinecross(x):
'''Converts negative longitudes to their positive equivalent for the sake of plotting.'''
if x<0:
return x+360
else:
return x
##############################################################################################
#Everything below this point serves the purpose of identifying and
#eliminating duplicate events between multiple earthquake catalog entries.
##############################################################################################
class Earthquake:
'''Creates an earthquake object from which event information can be extracted'''
def __init__(self,time,coords,depth,lat,lon,mag,catalog):
self.time = time
self.coords = coords
self.depth = depth
self.lat = lat
self.lon = lon
self.mag = mag
self.catalog = catalog
def getvals(row):
'''Gathers time, lat, lon, depth, mag, information from row in dataframe.'''
time = row['time']
lat = row['lat']
lon = row['lon']
depth = row['depth']
mag = row['mag']
ep = (lat,lon)
return time,ep,depth,lat,lon,mag
def boundtrim(cat1, cat2):
''' Arguments: cat1 - an earthquake catalog to be compared with cat2
cat2 - an earthquake catalog to be compared to cat1
Returns: cat1, cat2 - trimmed earthquake catalogs that only extend across bounds
where they both exist. Reduces processing time
'''
# Trims two earthquake catalogs to fit over the same region
lonmin1, lonmin2 = cat1['lon'].min(), cat2['lon'].min()
latmin1, latmin2 = cat1['lat'].min(), cat2['lat'].min()
lonmax1, lonmax2 = cat1['lon'].max(), cat2['lon'].max()
latmax1, latmax2 = cat1['lat'].max(), cat2['lat'].max()
minwest = (lonmax1 > 0 and lonmax1 < 180) or (lonmax2 > 0 and lonmax2 < 180)
maxeast = (lonmin1 < 0 and lonmin1 > -180) or (lonmin2 < 0 and lonmin2 > -180)
difference = abs(lonmin1-lonmax1)>180 or abs(lonmin2-lonmax2)>180
if minwest and maxeast and difference:
pass
else:
cat1 = cat1[(cat1.lon >= lonmin2) & (cat1.lon <= lonmax2)]
cat2 = cat2[(cat2.lon >= lonmin1) & (cat2.lon <= lonmax1)]
cat1 = cat1[(cat1.lat >= latmin2) & (cat1.lat <= latmax2)]
cat2 = cat2[(cat2.lat >= latmin1) & (cat2.lat <= latmax1)]
return cat1, cat2
def timetrim(cat1, cat2):
''' Arguments: cat1 - an earthquake catalog to be compared with cat2
cat2 - an earthquake catalog to be compared to cat1
Returns: cat1, cat2 - trimmed earthquake catalogs that only extend across time
frames where they both exist. Reduces processing time
'''
# Trims two earthquake catalogs to fit over the same time range
cat1['time'] = pd.to_datetime(cat1['time'])
cat2['time'] = pd.to_datetime(cat2['time'])
cat1min, cat1max = cat1['time'].min(), cat1['time'].max()
cat2min, cat2max = cat2['time'].min(), cat2['time'].max()
cat1 = cat1[(cat1.time >= cat2min) & (cat1.time <= cat2max)]
cat2 = cat2[(cat2.time >= cat1min) & (cat2.time <= cat1max)]
return cat1, cat2
def earthquake_string(eqo):
''' Puts earthquake information into a string to be written or printed
Arguments: eqo - earthquake object
Returns: eqos - a string of information stored in earthquake object input argument '''
eqos = (str(eqo.lat) + ',' + str(eqo.lon) + ',' + str(eqo.depth) + ','
+ str(eqo.mag) + ',' + str(eqo.time) + ',' + eqo.catalog)
return eqos
def find_closest(eqo, eqm1, eqm2):
'''Determines which of two potential matches in one catalog is closer to an event in another.
Arguments: eqo - earthquake event in first catalog that matches two events in the second
eqm1 - the first event in the second catalog that matches eqo
eqm2 - the second event in the second catalog that matches eqo
Returns: closest - the closest event weighting time first, then distance, then magnitude '''
# Prints information to console to make user aware of more than one match
print ('-------------------------------------- lat %s lon %s depth %s mag %s time \
%s catlog' % (',',',',',',',',','))
print ('There is more than one match for event: %s' % earthquake_string(eqo))
print ('event1: %s' % earthquake_string(eqm1))
print ('event2: %s' % earthquake_string(eqm2))
# Gets distance between either event and the common match eqo
darc1 = geodesic(eqo.coords, eqm1.coords).meters/1000
darc2 = geodesic(eqo.coords, eqm2.coords).meters/1000
dh1 = abs(eqo.depth - eqm1.depth)
dh2 = abs(eqo.depth - eqm2.depth)
dist1 = sqrt(darc1*darc1 + dh1*dh1)
dist2 = sqrt(darc2*darc2 + dh2*dh2)
# Gets magnitude and time differences between each event and the common match
dtime1 = abs(eqo.time - eqm1.time)
dtime2 = abs(eqo.time - eqm2.time)
dmag1 = abs(eqo.mag - eqm1.mag)
dmag2 = abs(eqo.mag - eqm2.mag)
# Finds the closest match to eqo by checking time first, then distance, then magnitude
if dtime1 < dtime2:
closest = eqm1
elif dtime2 < dtime1:
closest = eqm2
elif dtime1 == dtime2 and dist1 < dist2:
closest = eqm1
elif dtime1 == dtime2 and dist2 < dist1:
closest = eqm1
elif dmag1 == dmag2 and dist1 == dist2 and dmag1 < dmag2:
closest = eqm1
elif dmag1 == dmag2 and dist1 == dist2 and dmag2 < dmag1:
closest = eqm2
# If all things are equal, the first event is chosen as a match by default
else:
print ('The two events are equidistant to the match in time, space, and magnitude.\
The second event was therefore determined independent.')
closest = eqm1
return closest
print ('>>>>closest event: %s' % earthquake_string(closest))
return closest
def removematches(dfo, dfm):
'''Eliminates events in dfo (dataframe) that are found in dfm (dataframe) '''
ind = (dfo.time.isin(dfm.time) & dfo.lat.isin(dfm.lat) & dfo.lon.isin(dfm.lon)
& dfo.mag.isin(dfm.mag) & dfo.depth.isin(dfm.depth))
dfo = dfo[~ind]
return dfo
def rid_matches(cat1, cat2, name1, name2):
''' Compares two catalogs, identifies and removes matching events from cat2.
Arguments: cat1 - the first catalog (dataframe), no events are removed from this catalog
cat2 - the second catalog (dataframe), events in this catalog that are close
in space, time, and magnitude to those in cat1 are filtered out
name1 - the name of the first catalog, used for printing/bookeeping purposes
name2 - the name of the second catalog, used for printing/bookeeping purposes
Returns: df - a filtered version of cat2 without events that match those in cat1 '''
# Setting constants that define matching criteria
tdelta = 30
distdelta = 100
magdelta = 0.5
# Ensuring that all times are in datetime object format & trimming catalogs to only extend
# accross the bounds and time constraints of the other
cat1['time'] = | pd.to_datetime(cat1['time']) | pandas.to_datetime |
import pandas as pd
from dplypy.dplyframe import DplyFrame
def test_init():
pandas_df = pd.DataFrame(
data={
"col1": [0, 1, 2, 3],
"col2": [3, 4, 5, 6],
"col3": [6, 7, 8, 9],
"col4": [9, 10, 11, 12],
}
)
# Drop by columns
df1 = DplyFrame(pandas_df)
| pd.testing.assert_frame_equal(df1.pandas_df, pandas_df) | pandas.testing.assert_frame_equal |
# %%
from pathlib import Path
import sys
import os
from sklearn.preprocessing import binarize
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.linear_model import Lasso
from sklearn.linear_model import LassoCV
from sklearn import linear_model
from sklearn import model_selection, preprocessing
import seaborn as sns
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np # linear algebra
from scipy import interp
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn import svm, datasets
from itertools import cycle
import matplotlib.pyplot as plt
import numpy as np
# %%
print(__doc__)
# %%
# import xgboost as xgb
color = sns.color_palette()
Number_Monolayers = 6138
# %%
data_path = Path('./data/WR/')
# %%
# Create directory
# Create target Directory if don't exist
if not os.path.exists(data_path):
os.mkdir(data_path)
os.mkdir(data_path / 'DATA_SETS/')
os.mkdir(data_path / 'Figs/')
os.mkdir(data_path / 'SavedModels/')
os.mkdir(data_path / 'LASSO_Converged/')
print("Directory ", data_path, " Created ")
else:
print("Directory ", data_path, " already exists")
# %matplotlib inline
# %%
pd.options.mode.chained_assignment = None # default='warn'
pd.set_option('display.max_columns', 3000)
# %%
filename = data_path / \
("1l_atomicPLMF_" + str(Number_Monolayers) + "structures.csv")
# %%
if filename.exists() is False:
print(f"file does not exist: {filename} ")
downloadurl = "https://www.dropbox.com/sh/b1xjnrwyvnxvufy/AACQd3nAgPV3ASVq4EFlrhwra/" \
"LASSO_BR2_1?dl=0&preview=1l_atomicPLMF_6138structures.csv"
print(f"use curl to download {downloadurl}")
else:
print(f"file exists: {filename}")
# %%
pd.read_table(filename, nrows=1000, low_memory=False, header=None, sep='#')
# %%
skipcount = 100
with open(filename, mode='r') as f1:
i = 0
lines = []
for line in f1:
if (i % skipcount == 0):
lines.append(line)
i += 1
# %%
# read file with monolayers names and descriptors
monolayer_descriptors = pd.read_csv(filename, header=0)
titles = | pd.read_csv(filename, header=None) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Tests and Confidence Intervals for Binomial Proportions
Created on Fri Mar 01 00:23:07 2013
Author: <NAME>
License: BSD-3
"""
from statsmodels.compat.python import lzip
import numpy as np
from scipy import stats, optimize
from sys import float_info
from statsmodels.stats.base import AllPairsResults
from statsmodels.tools.sm_exceptions import HypothesisTestWarning
from statsmodels.stats.weightstats import _zstat_generic2
from statsmodels.stats.base import HolderTuple
from statsmodels.tools.testing import Holder
def proportion_confint(count, nobs, alpha=0.05, method='normal'):
'''confidence interval for a binomial proportion
Parameters
----------
count : int or array_array_like
number of successes, can be pandas Series or DataFrame
nobs : int
total number of trials
alpha : float in (0, 1)
significance level, default 0.05
method : {'normal', 'agresti_coull', 'beta', 'wilson', 'binom_test'}
default: 'normal'
method to use for confidence interval,
currently available methods :
- `normal` : asymptotic normal approximation
- `agresti_coull` : Agresti-Coull interval
- `beta` : Clopper-Pearson interval based on Beta distribution
- `wilson` : Wilson Score interval
- `jeffreys` : Jeffreys Bayesian Interval
- `binom_test` : experimental, inversion of binom_test
Returns
-------
ci_low, ci_upp : float, ndarray, or pandas Series or DataFrame
lower and upper confidence level with coverage (approximately) 1-alpha.
When a pandas object is returned, then the index is taken from the
`count`.
Notes
-----
Beta, the Clopper-Pearson exact interval has coverage at least 1-alpha,
but is in general conservative. Most of the other methods have average
coverage equal to 1-alpha, but will have smaller coverage in some cases.
The 'beta' and 'jeffreys' interval are central, they use alpha/2 in each
tail, and alpha is not adjusted at the boundaries. In the extreme case
when `count` is zero or equal to `nobs`, then the coverage will be only
1 - alpha/2 in the case of 'beta'.
The confidence intervals are clipped to be in the [0, 1] interval in the
case of 'normal' and 'agresti_coull'.
Method "binom_test" directly inverts the binomial test in scipy.stats.
which has discrete steps.
TODO: binom_test intervals raise an exception in small samples if one
interval bound is close to zero or one.
References
----------
https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval
<NAME>.; <NAME>; <NAME> (2001). "Interval
Estimation for a Binomial Proportion",
Statistical Science 16 (2): 101–133. doi:10.1214/ss/1009213286.
TODO: Is this the correct one ?
'''
pd_index = getattr(count, 'index', None)
if pd_index is not None and callable(pd_index):
# this rules out lists, lists have an index method
pd_index = None
count = np.asarray(count)
nobs = np.asarray(nobs)
q_ = count * 1. / nobs
alpha_2 = 0.5 * alpha
if method == 'normal':
std_ = np.sqrt(q_ * (1 - q_) / nobs)
dist = stats.norm.isf(alpha / 2.) * std_
ci_low = q_ - dist
ci_upp = q_ + dist
elif method == 'binom_test':
# inverting the binomial test
def func(qi):
return stats.binom_test(q_ * nobs, nobs, p=qi) - alpha
if count == 0:
ci_low = 0
else:
ci_low = optimize.brentq(func, float_info.min, q_)
if count == nobs:
ci_upp = 1
else:
ci_upp = optimize.brentq(func, q_, 1. - float_info.epsilon)
elif method == 'beta':
ci_low = stats.beta.ppf(alpha_2, count, nobs - count + 1)
ci_upp = stats.beta.isf(alpha_2, count + 1, nobs - count)
if np.ndim(ci_low) > 0:
ci_low[q_ == 0] = 0
ci_upp[q_ == 1] = 1
else:
ci_low = ci_low if (q_ != 0) else 0
ci_upp = ci_upp if (q_ != 1) else 1
elif method == 'agresti_coull':
crit = stats.norm.isf(alpha / 2.)
nobs_c = nobs + crit**2
q_c = (count + crit**2 / 2.) / nobs_c
std_c = np.sqrt(q_c * (1. - q_c) / nobs_c)
dist = crit * std_c
ci_low = q_c - dist
ci_upp = q_c + dist
elif method == 'wilson':
crit = stats.norm.isf(alpha / 2.)
crit2 = crit**2
denom = 1 + crit2 / nobs
center = (q_ + crit2 / (2 * nobs)) / denom
dist = crit * np.sqrt(q_ * (1. - q_) / nobs + crit2 / (4. * nobs**2))
dist /= denom
ci_low = center - dist
ci_upp = center + dist
# method adjusted to be more forgiving of misspellings or incorrect option name
elif method[:4] == 'jeff':
ci_low, ci_upp = stats.beta.interval(1 - alpha, count + 0.5,
nobs - count + 0.5)
else:
raise NotImplementedError('method "%s" is not available' % method)
if method in ['normal', 'agresti_coull']:
ci_low = np.clip(ci_low, 0, 1)
ci_upp = np.clip(ci_upp, 0, 1)
if pd_index is not None and np.ndim(ci_low) > 0:
import pandas as pd
if np.ndim(ci_low) == 1:
ci_low = pd.Series(ci_low, index=pd_index)
ci_upp = pd.Series(ci_upp, index=pd_index)
if np.ndim(ci_low) == 2:
ci_low = pd.DataFrame(ci_low, index=pd_index)
ci_upp = | pd.DataFrame(ci_upp, index=pd_index) | pandas.DataFrame |
import pandas as pd
import geopandas as gpd
import re, fiona, os, glob
import numpy as np
excel='2040 Project List_Consolidated draft with AQ (ORIGINAL).xlsx'
xl = pd.ExcelFile(excel)
sheetList = xl.sheet_names
# update the sheet list
sheetList = [sheet for sheet in sheetList if sheet not in ['Transit Constrained', 'Transit Illustrative', 'Table Data']]
path = r'T:\MPO\RTP\FY20 2045 Update\Data and Resources\ProjectReview'
newPath = r'T:\MPO\RTP\FY20 2045 Update\Data and Resources\Data\GISData'
inpath = r'T:\MPO\RTP\FY16 2040 Update\Data\RTP_2040_Data.gdb'
mapPath = r'T:\MPO\RTP\FY20 2045 Update\Data and Resources\ProjectReview\RTP_Projects\RTP_Projects.gdb'
newIDs = [488,460,144,382,390,411,470,492,149,353,193,170,173,410,299,216,136] # 488 and 136 are excluded and 492 is a point
tablePatterns = np.array(['Auto Constrained', 'Auto Illustrative', 'Bike Constrained', 'Bike Illustrative'])
layerPatterns = np.array(['Constrained_Roadway', 'Illustrative_Roadway', 'Constrained_BikePed', 'Illustrative_BikePed'])
# Step 4 - 2: add the project 57 and 738
# overwrite the shapefile
def addProj(ID = 57):
if ID == 57:
shp = 'Constrained_Roadway_lines.shp'
# project 57 - Improvements within Jasper-Natron Area
Proj = gpd.read_file(mapPath, layer='Improvements_within_Jasper_Natron_Area')
toMap = getToMap().head(1)
else:
shp = 'Illustrative_BikePed.shp'
# project 738 - Springfield Christian School Channel Path
gdf = gpd.read_file(inpath, layer='Illustrative_BikePed')
Proj = gdf[gdf.NAME == 'SCS Channel Path']
toMap = getToMap().tail(1)
toMap['RTP']= ID
toMap.rename(columns={'RTP': 'RTP_ID'}, inplace=True)
added_gdf = Proj[['RTP_ID', 'geometry']].merge(toMap, on='RTP_ID')
shortenColnames(added_gdf)
newgdf = gpd.read_file(os.path.join(newPath, 'Updated', shp))
commonCols = [col for col in newgdf.columns if col in added_gdf.columns]
updatedgdf = newgdf[commonCols].append(added_gdf[commonCols])
updatedgdf.to_file(os.path.join(newPath, 'Updated', shp))
# Step 4 - 1: add newly mapped projects to the existing projects
def addNewgdf():
Layers = targetLayers()
lineProjs = getNewgdf()[0]
shortenColnames(lineProjs)
pointProjs = getNewgdf()[1]
shortenColnames(pointProjs)
for layer in Layers:
print(layer)
l = layer.split('_')
layerPattern = l[0] + '_' + l[1]
i=np.min(np.where(layerPatterns == layerPattern))
tablePattern = tablePatterns[i]
if 'lines' in layer or 'BikePed' in layer and 'points' not in layer:
toAdd = lineProjs[lineProjs.In == tablePattern + ' ']
if 'points' in layer:
toAdd = pointProjs[pointProjs.In == tablePattern + ' ']
gdf = gpd.read_file(os.path.join(newPath, layer+'.shp'))
cols = [col for col in gdf.columns if col in toAdd.columns]
gdf = gdf[cols].append(toAdd[cols])
#print(gdf.tail())
gdf.to_file(os.path.join(newPath, 'Updated', layer+'.shp'))
print("Added projects {0} to the layer {1}".format(toAdd.Name.values, layer))
# Step 3: add previously dropped duplicated projects in either table with a review in GIS data
# drop duplicated GIS records in this step
# check if these added/duplicated projects are in the existing GIS data
def addOldGISdata():
addedProjects = pd.read_csv(os.path.join(path, 'addedProjects.csv'))
Layers = targetLayers()
for ID in addedProjects.RTP:
tablePattern = re.sub(r"(\w)([A-Z])", r"\1 \2", addedProjects[addedProjects.RTP == ID]['In'].values[0])
i=np.min(np.where(tablePatterns == tablePattern))
layerPattern = layerPatterns[i]
layers = [layer for layer in Layers if re.search(r"^{0}".format(layerPattern), layer)]
for layer in layers:
gdf = gpd.read_file(inpath, layer=layer)
if ID in gdf.RTP_ID.values:
print("Project ID {0} is in the layer {1}".format(ID, layer))
if ID == 828 and layer == 'Constrained_Roadway_points':
print("Pass RTP {0} for the layer {1}".format(ID, layer))
pass
else:
df = addedProjects[addedProjects.RTP == ID]
shortenColnames(df)
added_gdf = gdf[gdf.RTP_ID == ID][['RTP_ID', 'geometry']].merge(df, on='RTP_ID')
newgdf = gpd.read_file(newPath, layer=layer)
newgdf.drop_duplicates(inplace=True, ignore_index=True)
commonCols = [col for col in newgdf.columns if col in added_gdf.columns]
updatedgdf = newgdf[commonCols].append(added_gdf[commonCols])
updatedgdf.to_file(os.path.join(newPath, layer+'.shp'))
else:
print("Project ID {0} is NOT in the layer {1}".format(ID, layer))
# Step 2: match the 2045 GIS data from step 1 and common RTP IDs from tables compared between 2040 and 2045 in the same category
# with a review on the duplicated IDs in the same category
def updateOldGISdata():
path = r'T:\MPO\RTP\FY20 2045 Update\Data and Resources\Data'
Layers = targetLayers()
for layer in Layers:
print(layer)
gdf = gpd.read_file(os.path.join(path, layer+'.shp'))
l = layer.split('_')
layerPattern = l[0] + '_' + l[1]
i=np.min(np.where(layerPatterns == layerPattern))
tablePattern = tablePatterns[i]
df = getCombinedTables(cat='common', export=True, byCategory=True, category=tablePattern.replace(' ', ''))[1]
shortenColnames(df)
if layer == 'Constrained_Roadway_lines':
dropInd = df[((df.RTP_ID.isin([924, 333]) & (df.Category == 'New Collectors'))|
((df.RTP_ID == 918) & (df.Category == 'Study'))|
((df.RTP_ID == 828) & (df.Category == 'Arterial Capacity Improvements'))|
((df.RTP_ID == 32) & (df.Category == 'Arterial Capacity Improvements')))].index
df.drop(dropInd, inplace = True)
if layer == 'Constrained_Roadway_points':
dropInd = df[(df.RTP_ID.isin([924, 333])) & (df.Category == 'New Collectors')].index
df.drop(dropInd, inplace = True)
newgdf = gdf[['RTP_ID', 'geometry']].merge(df, on='RTP_ID')
newgdf.to_file(os.path.join(path, 'GISData', layer+'.shp'))
# Step 1: match ID between the 2040 GIS data and the 2045 table with common IDs in the same category
# get GIS data for 2045 by selecting the spatial features with common ID
def getOldGISdata():
Layers = targetLayers()
for layer in Layers:
print(layer)
gdf = gpd.read_file(inpath, layer=layer)
l = layer.split('_')
layerPattern = l[0] + '_' + l[1]
i=np.min(np.where(layerPatterns == layerPattern))
tablePattern = tablePatterns[i]
res=getIDs(Tablepattern=tablePattern,
Layerpattern=layerPattern)
commonIDs=matchID(res[0], res[1])[2]
newgdf = gdf[gdf.RTP_ID.isin(commonIDs)]
path = r'T:\MPO\RTP\FY20 2045 Update\Data and Resources\Data'
newgdf.to_file(os.path.join(path, layer+'.shp'))
# merge newly mapped projects with new IDs
def getNewgdf():
mappedNewProj = getToMap()
mappedNewProj.RTP = newIDs
lineProj_gdf = gpd.read_file(mapPath, layer='AddedLineProject2045')
lineProj_gdf.RTP_ID = lineProj_gdf.RTP_ID.astype(int)
mappedNewProj.rename(columns={"RTP": "RTP_ID"}, inplace=True)
lineProj_gdf = lineProj_gdf.merge(mappedNewProj, on="RTP_ID")
pointProj_gdf = gpd.read_file(mapPath, layer='AddedPointProject2045')
pointProj_gdf.RTP_ID = pointProj_gdf.RTP_ID.astype(int)
pointProj_gdf = pointProj_gdf.merge(mappedNewProj, on="RTP_ID")
return lineProj_gdf, pointProj_gdf
# get mapped IDs
def getMappedIDs(year=2040):
Layers = targetLayers()
IDs = []
for layer in Layers:
if year == 2040:
gdf = gpd.read_file(inpath, layer=layer)
else:
gdf = gpd.read_file(os.path.join(newPath, 'Updated', layer+'.shp'))
IDs.extend(gdf.RTP_ID.unique())
return set(IDs)
# projects with multiple IDs or without an ID in the tables
def getToMap():
toMap = pd.read_csv(os.path.join(path, 'projects_wo_unique_IDs.csv'))
dropInd = toMap[(toMap.GeographicLimits.isin(['Various Locations', 'Citywide']))|
(toMap.Category == 'Study')|
((toMap.Name == '<NAME>') & (toMap.Category == 'Multi-Use Paths With Road Project'))].index
toMap.drop(dropInd, inplace = True)
return toMap
def reviewIDbyName(layer= 'Constrained_Roadway_lines'):
ngdf = gpd.read_file(os.path.join(newPath, layer+'.shp'))
gdf = gpd.read_file(inpath, layer=layer)
toMap = getToMap()
toMapNames = toMap.Name.unique()
totalN = toMap.shape[0]
mappedNames = []
sel = [name for name in toMapNames if name in gdf.NAME.unique()]
# update ID by comparing the names
with open(os.path.join(path, "review_projects_wo_unique_IDs.txt"), 'a') as f:
#print("-"*100, file=f)
if layer == 'Constrained_Roadway_lines':
print("\n", file=f)
print("There are {0} projects to map, and they are {1}".format(totalN,
toMapNames), file=f)
newN = len(sel)
if newN > 0:
print("\n", file=f)
mappedNames.extend(sel)
print("Found matched project names {0}".format(sel), file=f)
print("\n", file=f)
print(layer, file=f)
print("\n", file=f)
print("The existing GIS data shows:", file=f)
if "lines" in layer:
# review existing GIS data
print(gdf[gdf.NAME.isin(sel)][['RTP_ID', 'NAME', 'LIMITS', 'CATEGORY', 'LENGTH', 'JURISDICTI']], file=f)
elif "points" in layer:
print(gdf[gdf.NAME.isin(sel)][['RTP_ID', 'NAME', 'LIMITS', 'Category', 'JURIS']], file=f)
else:
print(gdf[gdf.NAME.isin(sel)][['RTP_ID', 'NAME', 'LIMITS', 'Category', 'LENGTH', 'JURIS']], file=f)
# compare the projects to-map
print("\n", file=f)
print("The projects to map are:", file=f)
print(toMap[toMap.Name.isin(sel)][[ 'RTP', 'Name', 'GeographicLimits', 'Category', 'Length', 'PrimaryJurisdiction']], file=f)
# are they mapped yet with IDs?
print("\n", file=f)
print("The new GIS data shows:", file=f)
print(ngdf[ngdf.Name.isin(sel)][['RTP_ID', 'Name', 'GeoLimits', 'Category', 'Length', 'PrimJurisd']], file=f)
#for name in sel:
#toMap.loc[toMap.Name == name, 'RTP'] = gdf[gdf.NAME == name]['RTP_ID'].values[0]
commondf = getCombinedTables(cat='common')[0]
commonIDs = sorted(commondf['RTP'].unique())
if len([i for i in gdf[gdf.NAME.isin(sel)].RTP_ID if i in commonIDs]) > 0:
print("\n", file=f)
print("The tables with common IDs show these records:", file=f)
print(commondf[commondf.RTP.isin(gdf[gdf.NAME.isin(sel)].RTP_ID)][['RTP',
'Name40','Name45', 'In',
'Category40',
'Category45',
'GeographicLimits40',
'GeographicLimits45',
'PrimaryJurisdiction40',
'PrimaryJurisdiction45',
'Length40', 'Length45']], file=f)
return mappedNames, newN
def reviewIDinGIS(layer= 'Constrained_Roadway_lines', IDs=[924, 333]):
gdf = gpd.read_file(r'T:\MPO\RTP\FY16 2040 Update\Data\RTP_2040_Data.gdb', layer=layer)
return gdf[gdf.RTP_ID.isin(IDs)]
# get information for ID review
def reviewIDforGIS(year=2040, export=False):
df = pd.read_csv(os.path.join(path, str(year)+'repeatedRTPID.csv'))
df['Review'] = df[['Table1', 'Table2']].apply(lambda row: review(row.Table1, row.Table2)[0], axis = 1)
df['In'] = df[['Table1', 'Table2']].apply(lambda row: review(row.Table1, row.Table2)[1], axis = 1)
if export:
df.to_csv(os.path.join(path, str(year)+'repeatedRTPID.csv'), index=False)
return df[df.Review == 'yes']
# review the repeatedly-used ID in the different spreadsheets
# modified version of the function combineTables
def reviewRepeatedIDs(year=2040, excludeTransit = True):
if year == 2040:
table='2040 Project List_Consolidated draft with AQ (ORIGINAL).xlsx'
else:
table='Working DRAFT 2045 Project List.xlsx'
xl = pd.ExcelFile(table)
sheetNames = xl.sheet_names
sheetNames = [sheetnm for sheetnm in sheetNames if sheetnm != 'Table Data']
if excludeTransit:
sheetNames = [sheetnm for sheetnm in sheetNames if 'Transit' not in sheetnm]
for sheetName in sheetNames:
if sheetName == sheetNames[0]:
df = modifyRTP(readTable(sheetName=sheetName, year=year))
RTPlist = list(df.RTP.unique())
allrepeatedIDs = []
alllistIDs = []
else:
ndf = modifyRTP(readTable(sheetName=sheetName, year=year))
if ndf.shape[0] == 0:
pass
else:
nRTPlist = list(ndf.RTP.unique())
repeatedIDs = [ID for ID in nRTPlist if ID in RTPlist]
if len(repeatedIDs) > 0:
tables = sheetNames[0:sheetNames.index(sheetName)]
for table in tables:
for ID in repeatedIDs:
odf = modifyRTP(readTable(sheetName=table, year=year))
oRTPlist = list(odf.RTP.unique())
if ID in oRTPlist:
listIDs = [ID, sheetName, table]
alllistIDs.append(listIDs)
print("ID {0}: {1}, {2}".format(ID, sheetName, table))
RTPlist.extend(nRTPlist)
allrepeatedIDs.extend(repeatedIDs)
df = pd.DataFrame(alllistIDs, columns=['RTP_ID', 'Table1', 'Table2'])
df.to_csv(os.path.join(path, str(year)+'repeatedRTPID.csv'), index=False)
return RTPlist, allrepeatedIDs, df
def review(string1, string2):
if string1.split("-")[0] == string2.split("-")[0]:
res = "yes"
else:
res = "no"
return res, string1.split("-")[0]
# for the shapefile column name length limit 10
def shortenColnames(df):
df.rename(columns={'AirQualityStatus': 'AirQuaSta',
'Description': 'Narration',
'EstimatedCost': 'RoughCost',
'EstimatedYearofConstruction': 'YearRange',
'FunctionalClass': 'FunctClass',
'GeographicLimits': 'GeoLimits',
'JurisdictionalProject#': 'JurisProjN',
'PrimaryJurisdiction': 'PrimJurisd',
'YearofConstructionCostMax': 'CostMax',
'YearofConstructionCostMin': 'CostMin',
'RTP': 'RTP_ID'}, inplace = True)
# get combined tables in 2045
def getCombinedTables(cat='added', export=False, byCategory=False, category='AutoConstrained'):
if cat == 'added':
filePaths = glob.glob(os.path.join(path, '*45.csv'))
# the last file is project_2045.csv
filePaths.pop()
data = combineProjectReviewTable(filePaths)
data.columns = data.columns.str.replace('45', '')
elif cat == 'common':
if byCategory:
filePaths = glob.glob(os.path.join(path, category+'*.csv'))
exclude = re.compile(r'.*[0-9]{2}.csv')
filePaths = [f for f in filePaths if not exclude.match(f)]
else:
allfilePaths = glob.glob(os.path.join(path, '*.csv'))
filePaths = [os.path.join(path, sheetName.replace(' ', '') + '.csv') for sheetName in sheetList if os.path.join(path, sheetName.replace(' ', '') + '.csv') in allfilePaths]
df = combineProjectReviewTable(filePaths)
sel = df.columns.map(lambda x: bool(re.search('45',x)))
cols = list(df.columns[sel].values)
cols.append('RTP')
data = df[cols]
# another way to select columns with certain string pattern:
# df[df.columns[df.columns.to_series().str.contains('45')]].head()
data.columns = data.columns.str.replace('45', '')
elif cat == 'missing':
filePaths = glob.glob(os.path.join(path, '*40.csv'))
# the last file is project_2045.csv
filePaths.pop()
data = combineProjectReviewTable(filePaths)
data.columns = data.columns.str.replace('40', '')
if export:
if byCategory:
data.to_csv(os.path.join(path, cat + category + 'Projects.csv'), index=False)
else:
data.to_csv(os.path.join(path, cat + 'Projects.csv'), index=False)
if cat == 'common':
return df, data
else:
return data
# combine tables from the project review step
def combineProjectReviewTable(filePaths):
for filePath in filePaths:
if filePath == filePaths[0]:
df = pd.read_csv(filePath)
df['In'] = np.repeat(filePath.split('\\')[-1].split('-')[0], df.shape[0])
else:
ndf = pd.read_csv(filePath)
ndf['In'] = np.repeat(filePath.split('\\')[-1].split('-')[0], ndf.shape[0])
if 'RTP' in ndf.columns:
selectedColumns = [a for a in list(ndf.columns) if a in list(df.columns)]
ndf = ndf[selectedColumns]
df = df[selectedColumns]
df = df.append(ndf, ignore_index=True)
return df
# review RTP projects in all spreadsheets in a loop
def projectReviebyTable(sheetNames):
outpath = r'T:\MPO\RTP\FY20 2045 Update\Data and Resources\ProjectReview'
sheetComplete=[]
sheetEmpty=[]
sheet2Review=[]
for sheetName in sheetNames:
res = checkDiffbyTable(sheetName=sheetName, export=True)
if isinstance(res, pd.DataFrame):
sheet2Review.append(sheetName)
elif res == 0:
sheetEmpty.append(sheetName)
else:
sheetComplete.append(sheetName)
with open(os.path.join(outpath, "review_by_table.txt"), 'a') as f:
print("\n", file=f)
print("Need to review these tables:", file=f)
print(sheet2Review, file=f)
print("\n", file=f)
print("These tables are empty:", file=f)
print(sheetEmpty, file=f)
print("\n", file=f)
print("These tables are complete:", file=f)
print(sheetComplete, file=f)
# review RTP project in each spreadsheet between the tables 2040 and 2045
def checkDiffbyTable(sheetName='Auto Constrained - Arterial Lin', export=False):
outpath = r'T:\MPO\RTP\FY20 2045 Update\Data and Resources\ProjectReview'
df40r = readTable(sheetName=sheetName)
df45r = readTable(sheetName=sheetName, year=2045)
file = os.path.join(outpath, "review_by_table.txt")
with open(file, 'a') as f:
print("\n", file=f)
print(sheetName, file=f)
print("Dimension in 2040 data:", file=f)
print(df40r.shape, file=f)
print("Dimension in 2045 data:", file=f)
print(df45r.shape, file=f)
if df40r.shape[0] != 0 and df45r.shape[0] != 0:
keepcols = [col for col in df40r.columns if col in df45r.columns]
df40 = modifyRTP(df40r[keepcols])
df45 = modifyRTP(df45r[keepcols])
RTPex40 = len(df40r.RTP.unique()) - len(df40.RTP.unique())
RTPex45 = len(df45r.RTP.unique()) - len(df45.RTP.unique())
print("In 2040, there is {0} RTP in {1} items excluded in the match; In 2045, there is {2} RTP in {3} items excluded in the match.".format(RTPex40, df40r.shape[0] - df40.shape[0], RTPex45, df45r.shape[0] - df45.shape[0]), file=f)
df40.columns = df40.columns + '40'
df40.rename(columns={"RTP40": "RTP"}, inplace = True)
df45.columns = df45.columns + '45'
df45.rename(columns={"RTP45": "RTP"}, inplace = True)
df40.drop_duplicates(subset='RTP', keep=False, inplace=True)
df45.drop_duplicates(subset='RTP', keep=False, inplace=True)
data = df40.merge(df45, on='RTP')
keepcols.remove('RTP')
if data.shape[0] != 0:
for col in keepcols:
data[col+'Diff'] = data[[col+'40', col+'45']].apply(lambda row: compareDiff(row[col+'40'], row[col+'45']), axis = 1)
data = data.reindex(sorted(data.columns), axis=1)
if export:
outname = sheetName.replace(' ', '')
data.to_csv(os.path.join(outpath, outname + '.csv'), index=False)
if 'GeographicLimitsDiff' in data.columns:
print("Changes in geographic limits:", file=f)
print(data['GeographicLimitsDiff'].value_counts(), file=f)
if 'LengthDiff' in data.columns:
print("Changes in length:", file=f)
print(data['LengthDiff'].value_counts(), file=f)
print("Data include:", file=f)
print(data.columns, file=f)
print(data[['RTP', 'Name40', 'Name45']].head(), file=f)
if data.shape[0] == df40r.shape[0] == df45r.shape[0]:
res = 'ALL'
return data, res
else:
if len([a for a in list(df40.RTP.unique()) if a not in list(data.RTP.unique())]) > 0:
df40[~df40.RTP.isin(list(data.RTP.unique()))].to_csv(os.path.join(outpath, outname + '40.csv'), index=False)
if len([a for a in list(df45.RTP.unique()) if a not in list(data.RTP.unique())]) > 0:
df45[~df45.RTP.isin(list(data.RTP.unique()))].to_csv(os.path.join(outpath, outname + '45.csv'), index=False)
return data
else:
print("At least one of the two tables is empty or there is no match!", file=f)
return 0
# review the differences in multiple columns before and after changes over years with all spreadsheets included
def checkDiff(export=True, excludeTransit = False, by="ID"):
outpath = r'T:\MPO\RTP\FY20 2045 Update\Data and Resources\ProjectReview'
if excludeTransit:
df = modifyRTP(combineTables(excludeTransit = True))
df45 = modifyRTP(combineTables(year=2045, excludeTransit = True))
if export:
df.to_csv(os.path.join(outpath, 'project_2040_excludeTransit.csv'), index=False)
df45.to_csv(os.path.join(outpath, 'project_2045_excludeTransit.csv'), index=False)
else:
df = modifyRTP(combineTables())
df45 = modifyRTP(combineTables(year=2045))
if export:
df.to_csv(os.path.join(outpath, 'project_2040.csv'), index=False)
df45.to_csv(os.path.join(outpath, 'project_2045.csv'), index=False)
if by == "ID":
df['ID'] = df[['Name', 'GeographicLimits', 'Description', 'EstimatedYearofConstruction']].apply(lambda row: str(row.Name) + str(row.GeographicLimits) + str(row.Description) + str(row.EstimatedYearofConstruction), axis=1)
df45['ID'] = df45[['Name', 'GeographicLimits', 'Description', 'EstimatedYearofConstruction']].apply(lambda row: str(row.Name) + str(row.GeographicLimits) + str(row.Description) + str(row.EstimatedYearofConstruction), axis=1)
cols = [col for col in list(df.columns) if (col in list(df45.columns)) and (col != 'ID')]
df.columns = df.columns + '40'
df.rename(columns={"ID40": "ID"}, inplace = True)
df45.columns = df45.columns + '45'
df45.rename(columns={"ID45": "ID"}, inplace = True)
data = df.merge(df45, on='ID')
data = data.drop(['ID'], axis=1)
for col in cols:
data[col+'Diff'] = data[[col+'40', col+'45']].apply(lambda row: compareDiff(row[col+'40'], row[col+'45']), axis = 1)
if export:
data.to_csv(os.path.join(outpath, 'project_review.csv'), index=False)
else:
df = df[df.columns.drop(["EstimatedCost", "YearofConstructionCostMin", "YearofConstructionCostMax"])]
df45 = df45[df45.columns.drop(["EstimatedCost", "YearofConstructionCostMin", "YearofConstructionCostMax"])]
cols = [col for col in list(df.columns) if (col in list(df45.columns)) and (col != 'RTP')]
df.columns = df.columns + '40'
df.rename(columns={"RTP40": "RTP"}, inplace = True)
df45.columns = df45.columns + '45'
df45.rename(columns={"RTP45": "RTP"}, inplace = True)
df.drop_duplicates(subset='RTP', keep=False, inplace=True)
df45.drop_duplicates(subset='RTP', keep=False, inplace=True)
data = df.merge(df45, on='RTP')
for col in cols:
data[col+'Diff'] = data[[col+'40', col+'45']].apply(lambda row: compareDiff(row[col+'40'], row[col+'45']), axis = 1)
if export:
data.to_csv(os.path.join(outpath, 'project_review_RTP.csv'), index=False)
return data
# compare the values before and after changes over years
def compareDiff(a, b):
if type(a) is float and type(b) is float:
res = b - a
elif a != b:
res = 999
else:
res = 0
return res
# clean RTP format
def modifyRTP(df):
rtplist = df.RTP.unique()
strlist = [item for item in rtplist if type(item) is str]
df = df[~df.RTP.isin(strlist)]
df = df[~df.RTP.astype(float).isna()]
df.RTP = df.RTP.astype(int)
return df
# combine spreadsheets in one dataframe
# edit parts of the spreadsheet to have consistent formats
def combineTables(year=2040, excludeTransit = False):
if year == 2040:
table='2040 Project List_Consolidated draft with AQ (ORIGINAL).xlsx'
else:
table='Working DRAFT 2045 Project List.xlsx'
xl = pd.ExcelFile(table)
sheetNames = xl.sheet_names
sheetNames = [sheetnm for sheetnm in sheetNames if sheetnm != 'Table Data']
if excludeTransit:
sheetNames = [sheetnm for sheetnm in sheetNames if 'Transit' not in sheetnm]
for sheetName in sheetNames:
#print(sheetName)
if sheetName == sheetNames[0]:
df = readTable(sheetName=sheetName, year=year)
else:
ndf = readTable(sheetName=sheetName, year=year)
if ndf.shape[0] == 0:
pass
else:
selectedColumns = [a for a in list(ndf.columns) if a in list(df.columns)]
ndf = ndf[selectedColumns]
df = df[selectedColumns]
df = df.append(ndf, ignore_index=True)
return df
# read each spreadsheet and convert it to dataframe
def readTable(sheetName='Auto Constrained - Arterial Lin',
year=2040):
if year == 2040:
table='2040 Project List_Consolidated draft with AQ (ORIGINAL).xlsx'
else:
table='Working DRAFT 2045 Project List.xlsx'
xl = pd.ExcelFile(table)
if sheetName == 'Transit Constrained':
if year == 2040:
df1 = xl.parse(sheetName, skiprows=3, nrows=6)
df1 = addCategory(df1)
columns = df1.columns
df2 = xl.parse(sheetName, skiprows=10, nrows=9)
df2.columns = list(columns[0:(len(columns)-1)])
df2 = addCategory(df2)
df3 = xl.parse(sheetName, skiprows=20, nrows=6)
df3.columns = list(columns[0:(len(columns)-1)])
df3 = addCategory(df3)
else:
df1 = xl.parse(sheetName, nrows=5)
df1 = addCategory(df1)
columns = df1.columns
df2 = xl.parse(sheetName, skiprows=7, nrows=9)
df2.columns = list(columns[0:(len(columns)-1)])
df2 = addCategory(df2)
df3 = xl.parse(sheetName, skiprows=17, nrows=6)
df3.columns = list(columns[0:(len(columns)-1)])
df3 = addCategory(df3)
df = pd.concat([df1, df2, df3], ignore_index=True)
df.rename(columns={"Unnamed: 8": "Year of Construction Cost Max"}, inplace=True)
else:
df = xl.parse(sheetName)
if len([col for col in df.columns if 'Unnamed' in col]) > 5:
df = xl.parse(sheetName, skiprows=3)
if year == 2045:
if sheetName in ['Bike Illustrative - withRd', 'Bike Illustrative - onstreet w',
'Bike Illustrative - onstreet w', 'Bike Illustrative onstreet wout']:
df.rename(columns={"Unnamed: 7": "Year of Construction Cost Max"}, inplace=True)
else:
df.rename(columns={"Unnamed: 8": "Year of Construction Cost Max"}, inplace=True)
else:
if sheetName in ['Auto Constrained - Study', 'Bike Constrained - wRd',
'Bike Constrained - onstreet w', 'Bike Constrained - onstreet wou',
'Bike Illustrative - woutRD', 'Bike Illustrative - onstreet w',
'Bike Illustrative onstreet wout', 'Transit Illustrative']:
df.rename(columns={"Unnamed: 8": "Year of Construction Cost Max"}, inplace=True)
else:
df.rename(columns={"Unnamed: 9": "Year of Construction Cost Max"}, inplace=True)
if df.shape[0] == 0:
pass
else:
df = addCategory(df)
if year == 2040:
df.rename(columns={"Year of Construction\nCost Range": "Year of Construction Cost Min"}, inplace=True)
else:
df.rename(columns={"Year of Construction Range": "Year of Construction Cost Min",
"Year of Construction\nCost Range": "Year of Construction Cost Min",
"Year of Construction \nCost Range": "Year of Construction Cost Min"}, inplace=True)
df = df[df.columns.drop(list(df.filter(regex='Unnamed')))]
df.columns = df.columns.str.replace(' ', '')
df.columns = df.columns.str.replace('\n', '')
if 'EstimatedYearofStudy(4-YearWindow)' in list(df.columns):
df.rename(columns={"EstimatedYearofStudy(4-YearWindow)": "EstimatedYearofConstruction(4-YearWindow)"},
inplace=True)
df.rename(columns={"RTP#": "RTP", "EstimatedYearofConstruction(4-YearWindow)":"EstimatedYearofConstruction"}, inplace=True)
df.rename(columns={"GeogrpahicLimits": "GeographicLimits"}, inplace=True)
df.rename(columns={'EstimatedCost(2016)': 'EstimatedCost',
'EstimatedCost(2020)': 'EstimatedCost',
'EstimatedCost(2021)': 'EstimatedCost',
'EstimatedCost(20XX)': 'EstimatedCost'}, inplace=True)
df['In'] = np.repeat(sheetName.split('-')[0], df.shape[0])
return df
# add category to the dataframe
def addCategory(df):
name = df['Name'][0].split(":")[1].lstrip()
df = df.drop(labels=0, axis=0)
df = df[df.Name.astype(str) != 'nan']
#df = df[df.columns.drop(list(df.filter(regex='Unnamed')))]
s = pd.Series([name])
df['Category'] = list(s.repeat(df.shape[0]))
return df
# get targeted layers to modify GIS data
def targetLayers(patterns = ['Constrained_Roadway', 'Illustrative_Roadway', 'Constrained_BikePed', 'Illustrative_BikePed']):
targetLayers = []
for pattern in patterns:
layers = [item for item in getLayernames(pattern=pattern) if 'P1' not in item]
targetLayers += layers
return targetLayers
# match IDs between the table and GIS data
def matchID(rtpid_table, rtpid_layer):
newRTPid = [a for a in rtpid_table if a not in rtpid_layer]
missedRTPid = [a for a in rtpid_layer if a not in rtpid_table]
commonid = [a for a in rtpid_table if a in rtpid_layer]
return newRTPid, missedRTPid, commonid
# get RTP IDs from both spreadsheets and layers
def getIDs(excel='2040 Project List_Consolidated draft with AQ (ORIGINAL).xlsx',#Working DRAFT 2045 Project List.xlsx
Tablepattern='Auto Constrained',
Layerpattern='Constrained_Roadway'):
sheetNames = getSheetnames(excel=excel, pattern=Tablepattern)
rtpid_table = []
for sheetName in sheetNames:
#print(sheetName)
xl = pd.ExcelFile(excel)
df = xl.parse(sheetName)
if df.shape[0] != 0:
l = getRTPid(excel=excel, sheet_name=sheetName)[1]
rtpid_table += l
layers = [item for item in getLayernames(pattern=Layerpattern) if 'P1' not in item]
rtpid_layer = []
for layer in layers:
#print(layer)
l = LayerRTPid(layer = layer)
rtpid_layer += l
return rtpid_table, rtpid_layer
# get RTP ID from the layers
def LayerRTPid(gdb_file = r'T:\MPO\RTP\FY16 2040 Update\Data\RTP_2040_Data.gdb',
layer='Constrained_Roadway_lines_P1'):
gpdfile = gpd.read_file(gdb_file, layer=layer)
a = gpdfile.RTP_ID.unique()
return a[~np.isnan(a)].astype(int).tolist()
# get layer names
def getLayernames(gdb_file = r'T:\MPO\RTP\FY16 2040 Update\Data\RTP_2040_Data.gdb',
pattern='Constrained_Roadway'):
layers = fiona.listlayers(gdb_file)
return [layer for layer in layers if re.match(pattern, layer)]
# get RTP ID from the table
def getRTPid(excel='Working DRAFT 2045 Project List.xlsx',
sheet_name='Auto Constrained - Arterial Lin'):
xl = | pd.ExcelFile(excel) | pandas.ExcelFile |
#description: program uses dual moving average crossover to determine when to buy and sell stock
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
AAPL = pd.read_csv('StockTickers/SUNW.csv',nrows=200)
SMA30 = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_add_offset_array(self, tz, box):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_sub_offset_array(self, tz, box):
# GH#18824
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_with_offset_series(self, tz, names):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
expected_add = Series([dti[n] + other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_series_equal(res2, expected_add)
expected_sub = Series([dti[n] - other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = klass([ | Timestamp('2000-01-31 00:15:00', tz='US/Central') | pandas.Timestamp |
import glob
import logging
import os
from datetime import datetime
import numpy
import pandas
import pandas as pd
import prometheus_pandas.query
from app.algorithms.copod import COPODAlgorithm
from app.algorithms.loda import LODAAlgorithm
from app.algorithms.lmdd import LMDDAlgorithm
from app.algorithms.autoencoder import AutoEncoderAlgorithm
from app.algorithms.knn import KNNAlgorithm
from app.algorithms.cblof import CBLOFAlgorithm
from app.algorithms.hbos import HBOSAlgorithm
from app.algorithms.iforest import IForestAlgorithm
from app.dependencies.config_dependencies import connect_to_prometheus_instances, load_config
from app.dependencies.database_dependencies import load_session
from fastapi import BackgroundTasks, Depends, APIRouter
from matplotlib import pyplot as plt
# from prometheus_client import Counter
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sqlalchemy.orm import session
from app.utils.graph_builder import build_comparison_graphs
from app.utils.kmeans_utils import get_distance_by_point
from app.utils.performance_analysis import persist_evaluation, analyze_performance, prepare_and_store_dataframe
router = APIRouter()
DT_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
DT_FORMAT_ALTERNATIVE = '%Y-%m-%d %H:%M:%S'
microsoft_datasets = ["ms_ecommerce-api-incoming-rps", "ms_mongodb-application-rpsv",
"ms_middle-tier-api-dependency-latency-uv", "ms_middle-tier-api-dependency-latency-mv",
"ms_data-ingress-rate-01", "ms_data-ingress-rate-04", "ms_data-ingress-rate-05"]
microsoft_datasets_other_dt_format = ["ms_data-app-crash-rate2-01", "ms_data-app-crash-rate2-02",
"ms_data-app-crash-rate2-03", "ms_data-app-crash-rate2-04",
"ms_data-app-crash-rate2-05", "ms_data-app-crash-rate2-06",
"ms_data-app-crash-rate2-07", "ms_data-app-crash-rate2-08",
"ms_data-app-crash-rate2-09", "ms_data-app-crash-rate2-10",
"ms_data-app-crash-rate1-01", "ms_data-app-crash-rate1-02",
"ms_data-app-crash-rate1-03", "ms_data-app-crash-rate1-04",
"ms_data-app-crash-rate1-05", "ms_data-app-crash-rate1-06",
"ms_data-app-crash-rate1-07", "ms_data-app-crash-rate1-08",
"ms_data-app-crash-rate1-09"]
commercial_datasets = ["reg1-app18.csv", "reg0-app10.csv", "reg1-app0.csv", "reg1-app1.csv", "reg1-app2.csv",
"reg1-app3.csv", "reg1-app4.csv", "reg1-app5.csv", "reg1-app6.csv", "reg1-app7.csv",
"reg1-app8.csv", "reg1-app9.csv", "reg1-app10.csv", "reg1-app11.csv", "reg1-app12.csv",
"reg1-app13.csv", "reg1-app14.csv", "reg1-app15.csv", "reg1-app16.csv", "reg1-app17.csv",
"reg2-app1.csv", "reg2-app3.csv", "reg2-app4.csv", "reg2-app5.csv", "reg2-app8.csv",
"reg2-app10.csv", "reg2-app11.csv", "reg2-app17.csv", "reg2-app18.csv"]
nab_datasets = ["art_daily_no_jump", "art_daily_flatmiddle", "ec2_request_latency_system_failure"]
# Extension of Datasets: Add the dataset identifier her
additional_datasets = []
microsoft_unlabeled_datasets = ["ms_middle-tier-api-dependency-latency-mv"]
@router.get("/start_evaluation")
async def start_evaluation(background_tasks: BackgroundTasks,
prom_con: dict = Depends(
connect_to_prometheus_instances),
db_session: session = Depends(load_session),
conf: dict = Depends(load_config)):
"""Handles the HTTP Route for /start_evaluation and delegates the function to the background queue
:param background_tasks: The built-in background task queue from FastAPI
:param prom_con: Dependence to the Prometheus connection objects
:param db_session: Dependence to the database session object
:param conf: Dependence to the config map
:return: Returns text that the job was started as HTTP Response
"""
background_tasks.add_task(evaluate_all, prom_con, db_session, conf)
return "Started job: evaluation of all algorithms"
def evaluate_all(prom_con: dict, db_session: session, conf: dict):
"""Evaluates all configured anomaly detection frameworks and algorithms with the configured datasets on their
performance and stores the results in the database
:param prom_con: Prometheus connection objects
:param db_session: Database session object
:param conf: Config map
"""
# c.inc()
datasets = conf["evaluation"]["datasets"]
evaluation_result = {}
"""Iterate over all datasets configured in config_map"""
for dataset in datasets:
dataset_id = dataset["id"]
local_path = dataset["local_path"]
dataset_labeled = dataset["labeled"]
unsupervised = dataset["unsupervised"]
ts_type = dataset["ts_type"]
logging.info("Starting evaluation of dataset {0}".format(dataset_id))
# Dataset can trained supervised when labeled, skip this dataset
if not unsupervised and not dataset_labeled:
continue
"""Load dataset"""
pd_df = None
if local_path is not None and local_path != "":
if dataset_id in microsoft_datasets or dataset_id in microsoft_datasets_other_dt_format:
pd_df = dask_workaround_read_single_csv_dir(local_path)
pd_df.rename(columns={"TimeStamp": "timestamp", "Value": "value", "Label": "label"}, inplace=True)
elif dataset_id in nab_datasets:
pd_df = dask_workaround_read_single_csv_dir(local_path)
# pd_df.rename(columns={"TimeStamp": "timestamp", "Value": "value"}, inplace=True)
elif dataset_id in commercial_datasets:
pd_df = dask_workaround_read_single_csv_dir(local_path)
elif dataset_id in additional_datasets:
# Extension of Datasets: add the dataset loading and the transformation of the datasets here
# Extension of Datasets: remove the break statement
break
else:
logging.info("Dataset not found - skip this dataset")
break
"""Train the dataset with every specified algorithms"""
for algorithm in dataset["algorithms"]:
algo_id = algorithm["id"]
train_percentage = algorithm["train_percentage"]
identity = "evaluation_" + algo_id + "_with_dataset_" + dataset_id
df_length = len(pd_df.index)
df = pd_df.copy(deep=True)
if dataset_id in microsoft_datasets:
"""Convert string to unix timestamp"""
df["timestamp"] = df["timestamp"].apply(lambda x: datetime.strptime(x, DT_FORMAT).timestamp())
elif dataset_id in microsoft_datasets_other_dt_format:
"""Convert string to unix timestamp"""
df["timestamp"] = df["timestamp"].apply(lambda x: datetime.strptime(x, DT_FORMAT_ALTERNATIVE) \
.timestamp())
elif dataset_id in nab_datasets:
df["timestamp"] = df["timestamp"].apply(lambda x: datetime.strptime(x, DT_FORMAT_ALTERNATIVE).timestamp())
elif dataset_id in commercial_datasets:
df["timestamp"] = df["timestamp"].apply(lambda x: datetime.strptime(x, DT_FORMAT_ALTERNATIVE).timestamp())
elif dataset_id in additional_datasets:
# Extension of Datasets: add the dataset loading and the transformation of the dataset that is needed
# for the algorithm here
# Extension of Datasets: remove the break statement
break
else:
logging.info("Dataset not found - skip this dataset")
break
df.set_index("timestamp", inplace=True)
"""Split into train and test set according to config"""
train_data_length = int(train_percentage * df_length)
test_data_length = int(df_length - train_data_length)
train_df = df.iloc[0:train_data_length]
test_df = df.iloc[train_data_length:train_data_length + test_data_length]
"""Cache test_df for later comparison"""
train_value_counts = None
test_value_counts = None
contamination_train = 0.1
contamination_test = 0.1
test_df_cache = test_df.copy(deep=True)
"""Training phase"""
clf = None
success = False
if dataset_labeled and "contamination_train" not in algorithm:
train_value_counts = train_df["label"].value_counts()
test_value_counts = test_df["label"].value_counts()
if 0 not in train_value_counts:
s = pd.Series(data={0: 0})
train_value_counts = train_value_counts.append(s)
if 1 not in train_value_counts:
s = pd.Series(data={1: 0})
train_value_counts = train_value_counts.append(s)
if 0 not in test_value_counts:
s = pd.Series(data={0: 0})
test_value_counts.append(s)
if 1 not in test_value_counts:
s = pd.Series(data={1: 0})
test_value_counts.append(s)
contamination_train = train_value_counts.get(1).item() \
/ (train_value_counts.get(0).item() + train_value_counts.get(1).item())
# contamination_test = test_value_counts.get(1).item() \
# / (test_value_counts.get(0).item() + test_value_counts.get(1).item())
else:
contamination_train = algorithm["contamination_train"]
# contamination_test = contamination_train
# Contamination Train can't be null, so it is set to a very small number, but training should not be done
# on data without anomalies
if contamination_train == 0.0:
contamination_train = 0.00000001
# Remove for training unsupervised
if dataset_labeled and unsupervised:
test_df = test_df.drop(columns="label")
train_df = train_df.drop(columns="label")
elif dataset_labeled and not unsupervised:
test_df["label"] = 0
logging.info("Starting training phase of {0}".format(algo_id))
# Extension of Algorithms: Add the algorithm identifier (can be freely chosen, but must be unique) here and
# the corresponding algorithm class and training methods (that was added to the `algorithms` folder)
if algo_id == "iforest":
"""iForest Training"""
if unsupervised:
clf = IForestAlgorithm(contamination_train, ts_type, 1, 2)
clf.train_algorithm_unsupervised(train_df)
else:
clf = IForestAlgorithm(contamination_train, ts_type, 2, 2)
clf.train_algorithm_supervised(train_df)
elif algo_id == "cblof":
if unsupervised:
clf = CBLOFAlgorithm(contamination_train)
clf.train_algorithm_unsupervised(train_df)
else:
clf = CBLOFAlgorithm(contamination_train)
clf.train_algorithm_unsupervised(train_df)
elif algo_id == "hbos":
if unsupervised:
clf = HBOSAlgorithm(contamination_train)
clf.train_algorithm_unsupervised(train_df)
else:
clf = HBOSAlgorithm(contamination_train)
clf.train_algorithm_supervised(train_df)
elif algo_id == "knn":
if unsupervised:
clf = KNNAlgorithm(contamination_train)
clf.train_algorithm_unsupervised(train_df)
else:
clf = KNNAlgorithm(contamination_train)
clf.train_algorithm_supervised(train_df)
elif algo_id == "oneclasssvm":
if unsupervised:
clf = OneClassSVMAlgorithm(contamination_train)
clf.train_algorithm_unsupervised(train_df)
else:
clf = OneClassSVMAlgorithm(contamination_train)
clf.train_algorithm_supervised(train_df)
elif algo_id == "lmdd":
if unsupervised:
clf = LMDDAlgorithm(contamination_train)
clf.train_algorithm_unsupervised(train_df)
else:
clf = LMDDAlgorithm(contamination_train)
clf.train_algorithm_supervised(train_df)
elif algo_id == "copod":
if unsupervised:
clf = COPODAlgorithm(contamination_train)
clf.train_algorithm_unsupervised(train_df)
else:
clf = COPODAlgorithm(contamination_train)
clf.train_algorithm_supervised(train_df)
elif algo_id == "loda":
if unsupervised:
clf = LODAAlgorithm(contamination_train)
clf.train_algorithm_unsupervised(train_df)
else:
clf = LODAAlgorithm(contamination_train)
clf.train_algorithm_supervised(train_df)
current_datetime = datetime.now().strftime(DT_FORMAT)
if clf is not None:
success = clf.store_model_to_file("temp/" + identity)
if success:
evaluation_result[identity] = True
else:
evaluation_result[identity] = False
dataset_univariate = (ts_type == "univariate")
"""Evaluation phase"""
logging.info("Starting evaluation phase of {0}".format(algo_id))
algorithms = ["iforest", "copod", "loda", "cblof", "hbos", "knn", "oneclasssvm", "arima",
"autoencoder_torch", "abod",
"xgbod", "lmdd"]
additional_algorithms = []
if algo_id in algorithms:
prediction, prediction_outlier_scores = clf.predict_sample(test_df, ts_type, unsupervised)
result_df = prepare_and_store_dataframe(test_df_cache, current_datetime, prediction, identity,
conf["evaluation"]["df_output_dir"])
build_comparison_graphs(result_df, current_datetime, identity, train_percentage, contamination_train,
unsupervised, conf["evaluation"]["graph_output_dir"])
eval_1_values, eval_2_values = analyze_performance(result_df, prediction_outlier_scores,
dataset_labeled)
persist_evaluation(db_session, dataset_labeled, current_datetime, dataset_id, local_path, dataset_univariate,
dataset_labeled, contamination_train, unsupervised, algo_id, train_percentage, train_data_length,
test_data_length, eval_1_values, eval_2_values)
elif algo_id in additional_algorithms:
# Extension of Algorithms: Add the evaluation code for your algorithm here
# Extension of Algorithms: remove the break statement
break
elif algo_id == "prophet":
print("lel")
# prediction_df = clf.predict_sample(test_df, ts_type)
# result_df = prepare_and_store_dataframe(prediction_df, current_datetime, prediction_df["anomaly"],
# identity, conf["evaluation"]["df_output_dir"])
# result_df.rename(columns={"fact": "value", "anomaly": "label"}, inplace=True)
# build_comparison_graphs(result_df, current_datetime, identity,
# conf["evaluation"]["graph_output_dir"])
# eval_1_values, eval_2_values = analyze_performance(result_df, None)
# persist_evaluation(db_session, current_datetime, dataset_id, local_path, dataset_univariate, dataset_labeled, contamination_train, algo_id, train_percentage, train_data_length,
# test_data_length, eval_1_values, eval_2_values)
else:
logging.info("Failed to evalute with algorithm {0}".format(algo_id))
"""Remove temporary evaluation model files"""
try:
os.remove("temp/" + identity + ".joblib")
except FileNotFoundError as e:
logging.info(e)
logging.info("Failed to delete temporary file {0}, model could not be found".format("temp/" + identity
+ "joblib"))
logging.info("Finished job: evaluation of all algorithms")
return
def dask_workaround_read_csv_dir(path):
path = path + "/*.csv"
csv_files = glob.glob(path)
dataframes = list()
for f in csv_files:
df = | pd.read_csv(f) | pandas.read_csv |
from Sql.KLineTable import KLineTable, KLine
from Sql.StockBriefTable import StockBriefTable
from Sql.KLineBuyRecdTable import KLineBuyRecd
from Sql.ProfitRecdTable import ProfitRecdTable
from datetime import datetime
from datetime import timedelta
import pandas as pd
from pandas import DataFrame
from pandas.plotting import register_matplotlib_converters
import matplotlib.pyplot as plt
import talib
import math
def get_k_line_date(k_line):
return k_line.get_date()
class KLineAnalyzer:
scode_list = StockBriefTable.get_stock_id_list()
cur_scode_indx = 0
macd_peroid = 52
def get_cur_scode(self):
return self.scode_list[self.cur_scode_indx]
def analyze_one(self, stock_id, table_id=0):
register_matplotlib_converters()
k_line_list = KLineTable.select_k_line_list(stock_id, table_id)
date_arr = []
average_price_arr = []
average_cost_arr = []
ma_20_arr = []
ma_200_arr = []
close_arr = []
k_line_cnt = len(k_line_list)
for indx in range(0, k_line_cnt):
k_line = k_line_list[indx]
ma_20 = self.get_ma_close(indx, k_line_list, 20)
ma_200 = self.get_ma_close(indx, k_line_list, 200)
cur_date = datetime(k_line.year, k_line.month, k_line.day)
date_arr.append(cur_date)
average_cost_arr.append(k_line.cost)
average_price_arr.append(k_line.price)
ma_20_arr.append(ma_20)
ma_200_arr.append(ma_200)
close_arr.append(k_line.close)
cost_line_data_fame = DataFrame({
'DatetimeIndex': date_arr,
'average_price': average_price_arr,
'average_cost': average_cost_arr,
'close': close_arr,
'ma_20': ma_20_arr,
'ma_200': ma_200_arr
})
plt.plot('DatetimeIndex', 'close', data=cost_line_data_fame)
# plt.plot('DatetimeIndex', 'average_cost', data=cost_line_data_fame)
plt.plot('DatetimeIndex', 'ma_20', data=cost_line_data_fame)
# plt.plot('DatetimeIndex', 'ma_200', data=cost_line_data_fame)
plt.legend()
plt.show()
def is_cross(self, indx, k_line_list):
if indx <= 0 or indx >= len(k_line_list):
return False
else:
cur_k_line = k_line_list[indx]
pre_k_line = k_line_list[indx - 1]
return cur_k_line.diff > cur_k_line.dea and pre_k_line.dea > pre_k_line.diff and cur_k_line.diff > pre_k_line.diff
def is_sell_cross(self, indx, k_line_list):
if indx <= 0 or indx >= len(k_line_list):
return True
else:
cur_k_line = k_line_list[indx]
return cur_k_line.diff < cur_k_line.dea
def is_in_up_trend(self, date, k_line_list):
for idx in range(0, len(k_line_list)):
k_line = k_line_list[idx]
start_date = k_line.get_date()
end_date = start_date + timedelta(weeks=4)
if start_date <= date <= end_date:
return k_line.takeover > 180
return False
def is_match(self, indx, k_line_list):
peroid = 20
if indx <= peroid:
return False
cur_k_line = k_line_list[indx]
if self.is_cross(indx, k_line_list) and cur_k_line.diff >= 0.5:
return True
else:
return False
def is_sell_match(self, indx, k_line_list):
return self.is_sell_cross(indx, k_line_list) and False
def get_macd(self, df_close_data, fastperiod=12, slowperiod=26):
diff, dea, bar = talib.MACD(
df_close_data, fastperiod=fastperiod, slowperiod=slowperiod, signalperiod=9)
return diff, dea, bar
def get_ma_cost(self, indx, k_line_list, ma):
sum = 0.0
if indx + 1 < ma:
return 0
else:
for i in range(0, ma):
k_line = k_line_list[indx - i]
sum += k_line.cost
return sum / ma
def get_ma_close(self, indx, k_line_list, ma):
sum = 0.0
if indx + 1 < ma:
return 0
else:
for i in range(0, ma):
k_line = k_line_list[indx - i]
sum += k_line.close
return sum / ma
def is_stock_profit(self, profit_recd_list, date):
cur_date = date - timedelta(days=180)
cur_season = 1
has_recd = False
is_profit = True
if cur_date.month <= 3:
cur_season = 1
elif cur_date.month <= 6:
cur_season = 2
elif cur_date.month <= 9:
cur_season = 3
else:
cur_season = 4
for recd in profit_recd_list:
if recd.year < cur_date.year or (recd.year == cur_date.year and recd.season <= cur_season):
has_recd = True
if recd.profit <= 0:
is_profit = False
return is_profit and has_recd
def add_macd_to_k_line_list(self, k_line_list):
close_arr = []
for cur_idx in range(0, len(k_line_list)):
close_arr.append(k_line_list[cur_idx].close)
df_close = | DataFrame({'close': close_arr}) | pandas.DataFrame |
import boto3
import json
import pandas as pd
import numpy as np
import random
import re
import os
from global_variables import API_PARAMETERS_FILE
from global_variables import print_green, print_yellow, print_red
from global_variables import service_dict
from global_variables import default_feature_list
def read_api_calls_file():
with open(API_PARAMETERS_FILE, "r") as log_file:
data = json.load(log_file)
return data
def call_api_with_auto_generated_parameters(api, method, debug=True):
try:
client = boto3.client(api)
except ValueError as e:
print_red(str(e))
api_calls = read_api_calls_file()
# normal api calls
api_methods = json.loads(api_calls[api])
if debug:
print_green("Print list of methods belong to {0}".format(api))
for method in api_methods:
print(method)
response = None
try:
parameters = api_methods[method]
valued_params = {}
for parm in parameters:
valued_params[parm] = "random-value"
callable_api = getattr(client, method)
print_yellow(valued_params)
response = callable_api(**valued_params)
if debug:
print_yellow("Response after calling {0}.{1}:{2}".format(api,method,response))
except ValueError as e:
print_red(str(e))
return response
def assign_random_user():
"""
75% user with temporary credential
25% root
"""
rand = random.randint(4)
if rand >= 1:
user = "AssumedRole"
else:
user = "Root"
return user
def generate_api_calls_based_on_aws_cloudtrail_logs(log_file, number_of_replication, produce_random_sample=False):
"""
NOT TO USE THIS FUNCTION
"""
# read the log file
collected_logs = pd.read_csv(log_file)
columns = collected_logs.columns.tolist()
# first col: method, second api:request parameters
for log in collected_logs:
print(log)
for i in range(number_of_replication):
user = assign_random_user()
# region = assign_random_region()
for feature in columns:
call_api_with_auto_generated_parameters()
class AWSLogAnalyzer(object):
def __init__(self, log_file, log_dir, read_all_file=False):
self.log_file = log_dir + log_file
if read_all_file:
self.log_df = pd.read_csv(log_dir + log_file)
# call encoder once
if not os.path.exists("encoding.py"):
self.encode_event_names()
if not os.path.exists("vocab_file"):
self.fill_vocab_file_for_bert_tokenizer()
def fill_out_logs_encoder_dict(self):
if self.log_df is None:
print("Cannot generate logs encoder; log df is empty")
else:
unique_services = self.get_feature_from_dataset("eventsource")
if len(unique_services) >= 100:
print_red("There are more than a 100 service. YOU MIGHT HAVE A PROBLEM IN ENCODING")
return 0
# a list of services involved in the log file (index of the list shows the encoding)
service_list = []
for index, value in enumerate(unique_services):
if index < 10:
service_dict[value] = str(index).zfill(1)
else:
service_dict[value] = str(index)
with open("../encoding.py", 'w') as encoding_file:
encoding_file.write("service_dict = " + json.dumps(service_dict))
def encode_event_names(self):
"""Fit label encoder
Parameters
----------
y : array-like of shape=(n_samples,)
Target values
Returns
-------
self : LabelEncoder
returns an instance of self
"""
# Get unique values
y = self.get_feature_from_dataset("eventname")
y = np.unique(y)
# Encode labels
encoding = dict()
encoding = {x: i+len(encoding) for i, x in enumerate(set(y) - set(encoding))}
# generate the encoding dict
with open("../encoding.py", 'w') as encoding_file:
encoding_file.write("events_encoding = " + json.dumps(encoding))
encoding_file.write("\n")
# generate the decoding dict
inverse_encoding = {v: k for k, v in encoding.items()}
with open("../encoding.py", 'a') as encoding_file:
encoding_file.write("events_decoding = " + json.dumps(inverse_encoding))
encoding_file.close()
def fill_vocab_file_for_bert_tokenizer(self):
# Get unique values
y = self.get_feature_from_dataset("eventname")
y = np.unique(y)
with open("vocab_file", "w") as vocabs:
for item in y:
vocabs.write(item + "\n")
vocabs.close()
def convert_aws_region_to_int(self, logs_data_frame):
# TODO: complete the list of regions
region_dict = {
"us-east-1": 1,
"us-east-2": 2,
"us-west-1": 3,
"us-west-2": 4,
}
logs_data_frame.replace({"awsregion":region_dict}, inplace=True)
return logs_data_frame
def extract_user_identity_features(self):
self.log_df["principalid"] = self.log_df["useridentity"].apply(lambda x: re.search('principalid=(.+?),', x).group(1))
self.log_df["accountid"] = self.log_df["useridentity"].apply(lambda x: re.search('accountid=(.+?),', x).group(1))
self.log_df["invokedby"] = self.log_df["useridentity"].apply(lambda x: re.search('invokedby=(.+?),', x).group(1))
self.log_df.drop(columns=["useridentity"], inplace=True)
def extract_request_parameters(self, debug=True):
self.log_df.requestparameters.fillna("", inplace=True)
self.log_df["requestparameters"] = self.log_df['requestparameters'].map(lambda x: json.loads(x) if (x) else {})
req_parameter_col_expanded = | pd.json_normalize(self.log_df['requestparameters']) | pandas.json_normalize |
import glob
import os
import pandas as pd
import json
from functools import reduce
from collections import defaultdict
DEBUG = False
"""
Load the airline names, ids, and percent ontime from the lookup table and create the final object
and working dictionary
"""
def load_basic_airline_info():
path = os.path.normpath(os.path.join(os.getcwd(), 'data/assorted/carriers_percent_more.csv'))
this_data = pd.read_csv(path, skipinitialspace=True, low_memory=False)
airlines = []
airlines_dict = {}
for index, row in this_data.iterrows():
airline_info = {}
airline_id = row['OP_UNIQUE_CARRIER']
airline_info['airline_name'] = row['Description']
airline_info['airline_id'] = airline_id
airline_info['airline_percent_ontime_arrival'] = int(row['N/A(PCT_ONTIME_ARR)'])
airlines.append(airline_info)
airlines_dict[airline_id] = {}
airlines_dict[airline_id]['flights'] = 0
airlines_dict[airline_id]['departures_delay'] = 0
airlines_dict[airline_id]['arrival_delay'] = 0
airlines_dict[airline_id]['connecting_airports'] = []
return airlines, airlines_dict
"""
Load the airport names, ids, cities, states, and percent ontime from the lookup table and create
the final object and working dictionary
"""
def load_basic_airport_info():
path = os.path.normpath(os.path.join(os.getcwd(), 'data/assorted/airports.csv'))
this_data = pd.read_csv(path, skipinitialspace=True, low_memory=False)
airports = []
airports_dict = {}
for index, row in this_data.iterrows():
airport_info = {}
full_airport_name = row['Description'] # looks like "Nashville, TN: Nashville International"
airport_split_at_colon = full_airport_name.split(': ')
airport_id = row['ORIGIN']
airport_info['airport_name'] = airport_split_at_colon[1]
airport_info['airport_state'] = airport_split_at_colon[0].split(', ')[1]
airport_info['airport_city'] = airport_split_at_colon[0].split(', ')[0]
airport_info['airport_id'] = airport_id
airport_info['airport_percent_ontime_departure'] = int(row['N/A(PCT_ONTIME_DEP)'])
airports.append(airport_info)
airports_dict[airport_id] = {}
airports_dict[airport_id]['departures'] = 0
airports_dict[airport_id]['departures_delay'] = 0
airports_dict[airport_id]['arrivals'] = 0
airports_dict[airport_id]['taxi_in'] = 0
airports_dict[airport_id]['taxi_out'] = 0
airports_dict[airport_id]['connecting_airports'] = []
airports_dict[airport_id]['connecting_airlines'] = []
return airports, airports_dict
"""
Load a month's data from all the csv files for each year and create a dataframe
"""
def load_month_data(month):
path = os.path.normpath(os.path.join(os.getcwd(), 'data/flight-data/*/*_' + str(month) + '.csv'))
month_data = glob.glob(path)
loaded_data = []
for path in month_data:
temp_data = []
for chunk in pd.read_csv(path, skipinitialspace=True, low_memory=False, chunksize=20000):
temp_data.append(chunk)
this_data = pd.concat(temp_data, axis= 0)
loaded_data.append(this_data)
df = | pd.concat(loaded_data) | pandas.concat |
"""
Important cases
1. Base
2. Small sample adjustment
3. Constraints across equations
"""
import numpy as np
import pandas as pd
from linearmodels.tests.system._utility import generate_data
basic_data = generate_data(n=200, k=3, p=[2, 3, 4], const=True, seed=0)
common_data = generate_data(n=200, k=3, p=3, common_exog=True, seed=1)
missing_data = generate_data(n=200, k=3, p=[2, 3, 4], const=True, seed=2)
np.random.seed(1234)
for key in missing_data:
dep = missing_data[key]["dependent"]
locs = np.where(np.random.random_sample(dep.shape[0]) < 0.02)[0]
if np.any(locs):
dep.flat[locs] = np.nan
exog = missing_data[key]["exog"]
locs = np.where(np.random.random_sample(np.prod(exog.shape)) < 0.02)[0]
if np.any(locs):
exog.flat[locs] = np.nan
out = []
for i, dataset in enumerate((basic_data, common_data, missing_data)):
base = "mod_{0}".format(i)
for j, key in enumerate(dataset):
dep = dataset[key]["dependent"]
dep = pd.DataFrame(dep, columns=[base + "_y_{0}".format(j)])
dataset[key]["dependent"] = dep
exog = dataset[key]["exog"][:, 1:]
exog_cols = [base + "_x_{0}{1}".format(j, k) for k in range(exog.shape[1])]
exog = pd.DataFrame(exog, columns=exog_cols)
exog = exog.copy()
exog["cons"] = 1.0
dataset[key]["exog"] = exog
if i != 1 or j == 0:
out.extend([dep, exog])
else:
out.extend([dep])
if __name__ == "__main__":
df = | pd.concat(out, 1, sort=False) | pandas.concat |
from sklearn import tree
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from operator import itemgetter
import pandas as pd
import numpy as np
import graphviz
import matplotlib.pyplot as plt
# get data for LOCF and mean preprocessed
df = pd.read_csv("data/shots_LOCF.csv", index_col="shot_num")
# turn categorical into numerical
df["play_pattern"] = | pd.Categorical(df["play_pattern"]) | pandas.Categorical |
from http.client import responses
from statistics import mean
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
df = pd.read_csv(filename , parse_dates =['Date'])
df.dropna(inplace=True)
df = df[df['Temp']>=-10]
responses = df['Temp']
df['DayOfYear'] = df['Date'].dt.dayofyear
df.drop(['Temp'], axis=1, inplace=True)
return (df,responses)
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
x, y = load_data('/home/ronyzerkavod/IML.HUJI/datasets/City_Temperature.csv')
# Question 2 - Exploring data for specific country
df_curr = pd.concat([x,y], axis=1)
only_israel = df_curr[df_curr['Country']=='Israel']
only_israel['Year'] = only_israel['Year'].astype(str)
px.scatter(only_israel, x="DayOfYear", y="Temp", color="Year", title="Tempreture In Israel").show()
df_Month = df_curr.groupby('Month').Temp.agg(std="std")
px.bar(df_Month, x=df_Month.index, y="std", title="standard deviation of the daily temperatures").show()
# Question 3 - Exploring differences between countries
df_curr = pd.concat([x,y], axis=1)
df_set_month_country = df_curr.groupby(['Month','Country']).Temp.agg(std="std",mean="mean").reset_index()
px.line(df_set_month_country, x="Month", y="mean",error_y="std", color="Country", title="the average monthly temperature and std").show()
# Question 4 - Fitting model for different values of `k`
israel_y = only_israel['Temp']
israel_x = only_israel.drop(['Temp'], axis=1, inplace=False)
train_x_i, train_y_i, test_x_i, test_y_i = split_train_test(israel_x['DayOfYear'],israel_y,0.75)
train_x_i = np.array(train_x_i)
train_y_i = np.array(train_y_i)
test_x_i = np.array(test_x_i)
test_y_i= np.array(test_y_i)
k_values = np.linspace(1, 10, 10).astype(int)
losses= []
for deg in k_values:
pol_fit = PolynomialFitting(deg)
pol_fit.fit(train_x_i,train_y_i)
mse_loss = pol_fit.loss(test_x_i,test_y_i)
losses.append(round(mse_loss,2))
print(losses)
df_k_loss = pd.DataFrame({"k_val":k_values,"loss":losses})
px.bar(df_k_loss, x=k_values, y=losses, title="the test error recorded for each value of k").show()
# Question 5 - Evaluating fitted model on different countries
k = 5
pol_fit_model = PolynomialFitting(5)
pol_fit_model._fit(israel_x["DayOfYear"].to_numpy(),israel_y)
all_countries = | pd.concat([x,y],axis=1) | pandas.concat |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime
import os
import time
def ScoreProcess():
"""
extract student score ranking percentage feature
"""
if os.path.exists('input/processed/score_train_test.csv'):
return
score_train = pd.read_table('input/train/score_train.txt', sep=',', header=None)
score_train.columns = ["stu_id","school_id","grade_rank"]
score_test = pd.read_table('input/test/score_test.txt', sep=',', header=None)
score_test.columns = ["stu_id","school_id","grade_rank"]
score_train_test = pd.concat([score_train,score_test])
#compute total people of every school
school_dict = {}
for sch_id in score_train_test["school_id"].unique():
school_dict[sch_id] = score_train_test[score_train_test["school_id"] == sch_id]["grade_rank"].max()
#compute students' rank rate at his/her school
score_train_test["rank_rate"] = score_train_test.apply(lambda row : row["grade_rank"] / school_dict[row["school_id"]], axis=1)
# save the processed score dataframe in csv format
score_train_test.to_csv("input/processed/score_train_test.csv", index=False)
def DormProcess():
"""
extract student's dorm in and out frequency
"""
if os.path.exists('input/processed/dorm_train_test.csv'):
return
dorm_train = pd.read_table('input/train/dorm_train.txt', sep=',', header=None)
dorm_train.columns = ["stu_id","when","direction"]
dorm_test = pd.read_table('input/test/dorm_test.txt', sep=',', header=None)
dorm_test.columns = ["stu_id","when","direction"]
dorm_train_test = pd.concat([dorm_train,dorm_test])
# count the frequency of in and out
cleaned_dorm = pd.DataFrame({"in_count":dorm_train_test.groupby(["stu_id"])["direction"].sum(),
"all_count":dorm_train_test.groupby(["stu_id"])["direction"].count()}).reset_index()
cleaned_dorm["out_count"] = cleaned_dorm["all_count"] - cleaned_dorm["in_count"]
cleaned_dorm.drop("all_count", axis=1, inplace=True)
cleaned_dorm.to_csv("input/processed/dorm_train_test.csv", index=False)
def LibraryProcess():
"""
extract student's library IO and book borrow frequency
"""
if os.path.exists('input/processed/lib_io_count.csv') and os.path.exists('input/processed/borrow_book_count.csv'):
return
# library io data
lib_train = pd.read_table('input/train/library_train.txt', sep=',', header=None)
lib_train.columns = ["stu_id","gate","time"]
lib_test = pd.read_table('input/test/library_test.txt', sep=',', header=None)
lib_test.columns = ["stu_id","gate","time"]
lib_train_test = pd.concat([lib_train,lib_test])
lib_io_count = pd.DataFrame({"lib_io_count": lib_train_test.groupby(["stu_id"])["gate"].count()}).reset_index()
lib_io_count.to_csv("input/processed/lib_io_count.csv", index=False)
# process book borrow data
def bookBorrowProcess(borrow_file):
borrow_list = []
with open(borrow_file, 'r', encoding='utf-8') as file:
content = file.readlines()
for line in content:
borrow_dict = {}
split_line = line.split(',')
borrow_dict["stu_id"] = split_line[0]
borrow_dict["borrow_time"] = split_line[1]
borrow_dict["book_name"] = split_line[2]
if split_line[-1] == split_line[2]:
borrow_dict["book_code"] = np.nan
else:
borrow_dict["book_code"] = split_line[-1].rstrip('\n')
borrow_list.append(borrow_dict)
borrow_list_df = pd.DataFrame(borrow_list)
return borrow_list_df
borrow_train_df = bookBorrowProcess('input/train/borrow_train.txt')
borrow_test_df = bookBorrowProcess('input/test/borrow_test.txt')
borrow_train_test = pd.concat([borrow_train_df, borrow_test_df])
borrow_book_count = pd.DataFrame({"books_borrow_count": borrow_train_test.groupby(["stu_id"])["book_name"].count()}).reset_index()
borrow_book_count.to_csv("input/processed/borrow_book_count.csv", index=False)
def CardProcess():
"""
extract multiple dimensional consume features from card data, including consume places, consume type, consume time.
"""
# some preprocess on card data
card_train = pd.read_table("input/train/card_train.txt", sep=",", header=None)
card_train.columns = ["stu_id", "consume_form", "where", "type", "when", "amount", "residual"]
card_test = pd.read_table("input/test/card_test.txt", sep=",", header=None)
card_test.columns = ["stu_id", "consume_form", "where", "type", "when", "amount", "residual"]
card_train_test = pd.concat([card_train, card_test])
# filter the card data using consume_form column selecting “POS消费”,“nan”,“车载消费”
filter_cond = (card_train_test["consume_form"]=="POS消费") | (card_train_test["consume_form"]=="车载消费") | (card_train_test["consume_form"].isnull())
consume_train_test = card_train_test[filter_cond]
consume_train_test = consume_train_test.drop(["consume_form"],axis=1)
# convert Chinese into english in Series "type" using map function
type_dict = {"食堂": 0, "超市": 1, "图书馆": 2 , "洗衣房": 3, "开水": 4, "淋浴": 5, "文印中心": 6, "教务处": 7, "校车": 8, "校医院": 9, "其他": 10}
consume_train_test["type"] = consume_train_test["type"].map(type_dict)
consume_train_test["type"].fillna(11, inplace=True)
consume_train_test["type"] = consume_train_test["type"].astype(int)
consume_train_test["where"] = consume_train_test["where"].str.extract("(\d+)")
consume_train_test["where"].fillna(-1, inplace=True)
consume_train_test.to_csv("input/processed/consume_train_test.csv", index=False)
# feature 1: group the consume_train_test data by stu_id and compute some features like count, min, max, sum.
consume_by_id = pd.DataFrame({"consume_count": consume_train_test.groupby(['stu_id'])['amount'].count(),
"consume_sum": consume_train_test.groupby(['stu_id'])['amount'].sum(),
"consume_max": consume_train_test.groupby(['stu_id'])['amount'].max(),
"consume_median": consume_train_test.groupby(['stu_id'])['amount'].median()}).reset_index()
consume_by_id["residual_sum"] = consume_train_test.groupby(['stu_id'])['residual'].sum()
consume_by_id["residual_max"] = consume_train_test.groupby(['stu_id'])['residual'].max()
consume_by_id["residual_median"] = consume_train_test.groupby(['stu_id'])['residual'].median()
consume_by_id.to_csv("input/processed/consume_by_id.csv", index=False)
# feature 2: extract some statistic features based on consume type
# sum of different consume type
consume_by_type_sum = consume_train_test.groupby(['stu_id', 'type'])["amount"].sum().unstack().reset_index()
# some types have too many missing values, so drop them and keep the type 0,1,2,3,4,5,6,8
consume_by_type_sum.drop([7, 9, 10, 11], axis=1, inplace=True)
# change the column names which are more indicative
consume_by_type_sum.columns = ['stu_id', 'type_0_sum', 'type_1_sum', 'type_2_sum', 'type_3_sum', 'type_4_sum', 'type_5_sum', 'type_6_sum', 'type_8_sum']
# count of different consume type
consume_by_type_count = consume_train_test.groupby(['stu_id', 'type'])["amount"].count().unstack().reset_index()
consume_by_type_count.drop([7, 9, 10, 11], axis=1, inplace=True)
consume_by_type_count.columns = ['stu_id', 'type_0_count', 'type_1_count', 'type_2_count', 'type_3_count', 'type_4_count', 'type_5_count', 'type_6_count', 'type_8_count']
# max of different consume type
consume_by_type_max = consume_train_test.groupby(['stu_id', 'type'])["amount"].max().unstack().reset_index()
consume_by_type_max.drop([7, 9, 10, 11], axis=1, inplace=True)
consume_by_type_max.columns = ['stu_id', 'type_0_max', 'type_1_max', 'type_2_max', 'type_3_max', 'type_4_max', 'type_5_max', 'type_6_max', 'type_8_max']
# merge the consume_by_type data
consume_by_type = pd.merge(consume_by_type_sum, consume_by_type_count, how='left', on='stu_id')
consume_by_type = pd.merge(consume_by_type, consume_by_type_max, how='left', on='stu_id')
consume_by_type.to_csv("input/processed/consume_by_type.csv", index=False)
# feature 3: extract consume monthly of every student
consume_train_test["when"] = consume_train_test.when.apply(lambda t: datetime.strptime(t,"%Y/%m/%d %H:%M:%S"))
consume_train_test["year"] = consume_train_test.when.apply(lambda t: t.year)
consume_train_test["month"] = consume_train_test.when.apply(lambda t: t.month)
consume_train_test["day"] = consume_train_test.when.apply(lambda t: t.day)
consume_train_test["hour"] = consume_train_test.when.apply(lambda t: t.hour)
consume_train_test.drop(['residual'], axis=1, inplace=True)
consume_train_test.to_csv("input/processed/consume_train_test_timesplit.csv", index=False)
# monthly consume
consume_monthly = pd.DataFrame({"days_in_a_month":consume_train_test.groupby(["stu_id","year","month"])["day"].count(),
"consume_by_month":consume_train_test.groupby(["stu_id","year","month"])["amount"].sum()}).reset_index()
# rule out some vacation months, like summer and winter vacation. including 2014.1,2014.2,2014.7,2014.8,2015.1,2015.2,2015.7,2015.8
for month in [1,2,7,8]:
consume_monthly = consume_monthly[consume_monthly["month"] != month]
# filter the month with a abnormal consume frequency, less than 3 in column "days_in_a_month"
consume_monthly = consume_monthly[consume_monthly["days_in_a_month"] >= 3]
consume_by_month = pd.DataFrame({"avg_consume_monthly": consume_monthly.groupby(["stu_id"])["consume_by_month"].mean(),
"median_consume_monthly": consume_monthly.groupby(["stu_id"])["consume_by_month"].median(),
"max_consume_monthly": consume_monthly.groupby(["stu_id"])["consume_by_month"].max(),
"min_consume_monthly": consume_monthly.groupby(["stu_id"])["consume_by_month"].min(),
"avg_count_consume_monthly": consume_monthly.groupby(["stu_id"])["days_in_a_month"].mean()}).reset_index()
consume_by_month = consume_by_month.astype(int)
consume_by_month.to_csv("input/processed/consume_by_month.csv", index=False)
# feature 4: extract summer vacation consume features
consume_monthly_all = pd.DataFrame({"days_in_a_month":consume_train_test.groupby(["stu_id","year","month"])["day"].count(),
"consume_by_month":consume_train_test.groupby(["stu_id","year","month"])["amount"].sum()}).reset_index()
consume_month_july = consume_monthly_all[consume_monthly_all["month"] == 7]
consume_month_july = consume_month_july[["stu_id", "consume_by_month", "days_in_a_month"]]
consume_month_july.columns = ["stu_id", "consume_july", "count_july"]
consume_month_august = consume_monthly_all[consume_monthly_all["month"] == 8]
consume_month_august = consume_month_august[["stu_id", "consume_by_month", "days_in_a_month"]]
consume_month_august.columns = ["stu_id", "consume_august", "count_august"]
consume_july_august = pd.merge(consume_month_july, consume_month_august, how='left', on='stu_id')
consume_july_august.to_csv("input/processed/consume_july_august.csv", index=False)
# feature 5: extract weekend consume features
consume_train_test["weekday"] = consume_train_test.when.apply(lambda t: t.weekday())
consume_train_test["weekend_or_not"] = consume_train_test["weekday"].apply(lambda i: 1 if i >= 5 else 0)
consume_by_weekend = pd.DataFrame({"consume_weekend_count": consume_train_test.groupby(['stu_id', 'weekend_or_not'])['amount'].count(),
"consume_weekend_sum": consume_train_test.groupby(['stu_id', 'weekend_or_not'])['amount'].sum(),
"consume_weekend_max": consume_train_test.groupby(['stu_id', 'weekend_or_not'])['amount'].max(),
"consume_weekend_mean": consume_train_test.groupby(['stu_id', 'weekend_or_not'])['amount'].mean(),
"consume_weekend_median": consume_train_test.groupby(['stu_id', 'weekend_or_not'])['amount'].median()}).reset_index()
consume_by_weekend = consume_by_weekend[consume_by_weekend["weekend_or_not"] == 1]
consume_by_weekend.drop(["weekend_or_not"], axis=1, inplace=True)
consume_by_weekend.drop(["consume_weekend_mean"], axis=1, inplace=True)
consume_by_weekend.to_csv("input/processed/consume_by_weekend.csv", index=False)
# feature 6: extract some features from dining hall consume data
consume_of_dining = consume_train_test[consume_train_test["type"] == 0]
grouped_dining = pd.DataFrame({"consume_count_dining": consume_of_dining.groupby(['where'])['amount'].count(),
"consume_mean_dining": consume_of_dining.groupby(['where'])['amount'].mean(),
"consume_median_dining": consume_of_dining.groupby(['where'])['amount'].median()}).reset_index()
sort_by_count = grouped_dining.sort("consume_count_dining", ascending=False)
sort_by_mean = grouped_dining.sort("consume_mean_dining", ascending=False)
# pick ten most popular dining halls to extract features
ten_pop_dining = list(sort_by_count["where"][0:10])
# combine the sort_by_mean data and the sort_by_median data, pick seven most expensive dining halls to extract features
seven_exp_dining = list(sort_by_mean["where"][0:7])
dining_of_interest = ten_pop_dining + seven_exp_dining
dining_of_interest_df = consume_of_dining[consume_of_dining["where"].isin(dining_of_interest)]
consume_interest_dining_count = dining_of_interest_df.groupby(['stu_id', 'where'])["amount"].count().unstack().reset_index()
consume_interest_dining_count.columns = ['stu_id', 'place_1155_count', 'place_118_count', 'place_1551_count', 'place_1683_count', 'place_1985_count', 'place_217_count', 'place_232_count', 'place_247_count', 'place_250_count', 'place_272_count', 'place_275_count', 'place_60_count', 'place_61_count', 'place_69_count', 'place_72_count', 'place_83_count', 'place_841_count']
consume_interest_dining_max = dining_of_interest_df.groupby(['stu_id', 'where'])["amount"].max().unstack().reset_index()
consume_interest_dining_max.columns = ['stu_id', 'place_1155_max', 'place_118_max', 'place_1551_max', 'place_1683_max', 'place_1985_max', 'place_217_max', 'place_232_max', 'place_247_max', 'place_250_max', 'place_272_max', 'place_275_max', 'place_60_max', 'place_61_max', 'place_69_max', 'place_72_max', 'place_83_max', 'place_841_max']
consume_interest_dining_mean = dining_of_interest_df.groupby(['stu_id', 'where'])["amount"].mean().unstack().reset_index()
consume_interest_dining_mean.columns = ['stu_id', 'place_1155_mean', 'place_118_mean', 'place_1551_mean', 'place_1683_mean', 'place_1985_mean', 'place_217_mean', 'place_232_mean', 'place_247_mean', 'place_250_mean', 'place_272_mean', 'place_275_mean', 'place_60_mean', 'place_61_mean', 'place_69_mean', 'place_72_mean', 'place_83_mean', 'place_841_mean']
consume_interest_dining_median = dining_of_interest_df.groupby(['stu_id', 'where'])["amount"].median().unstack().reset_index()
consume_interest_dining_median.columns = ['stu_id', 'place_1155_median', 'place_118_median', 'place_1551_median', 'place_1683_median', 'place_1985_median', 'place_217_median', 'place_232_median', 'place_247_median', 'place_250_median', 'place_272_median', 'place_275_median', 'place_60_median', 'place_61_median', 'place_69_median', 'place_72_median', 'place_83_median', 'place_841_median']
consume_interest_dining = pd.merge(consume_interest_dining_count, consume_interest_dining_max, how='left', on='stu_id')
consume_interest_dining = pd.merge(consume_interest_dining, consume_interest_dining_mean, how='left', on='stu_id')
consume_interest_dining = pd.merge(consume_interest_dining, consume_interest_dining_median, how='left', on='stu_id')
consume_interest_dining.to_csv("input/processed/consume_interest_dining.csv", index=False)
# feature 7: extract some features from supermarket consume data
consume_of_supermarket = consume_train_test[consume_train_test["type"] == 1]
supermarket_of_interest = ["188", "190", "192", "219", "248"]
supermarket_of_interest_df = consume_of_supermarket[consume_of_supermarket["where"].isin(supermarket_of_interest)]
consume_interest_super_count = supermarket_of_interest_df.groupby(['stu_id', 'where'])["amount"].count().unstack().reset_index()
consume_interest_super_count.columns = ["stu_id", "place_188_count", "place_190_count", "place_192_count", "place_219_count", "place_248_count"]
consume_interest_super_sum = supermarket_of_interest_df.groupby(['stu_id', 'where'])["amount"].sum().unstack().reset_index()
consume_interest_super_sum.columns = ["stu_id", "place_188_sum", "place_190_sum", "place_192_sum", "place_219_sum", "place_248_sum"]
consume_interest_super = pd.merge(consume_interest_super_count, consume_interest_super_sum, how='left', on='stu_id')
consume_interest_super.to_csv("input/processed/consume_interest_super.csv", index=False)
# feature 8: consume analysis based on time intervals in a day
consume_train_test_timesplit = | pd.read_csv("input/processed/consume_train_test_timesplit.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
import joblib
import matplotlib.pyplot as plot
import matplotlib.pyplot as plt
import seaborn as sns
import streamlit as st
from PIL import Image
from sklearn.linear_model import LogisticRegression
import json
st.write("""
# White-or-red-wine
### Which one to choose at all times?
""")
#Carga del modelo guardado
model = joblib.load(open("src/modelo_wine.joblib", "rb"))
def data_preprocesador(df):
"""
función preprocesa la entrada del usuario
return type: pandas dataframe
"""
df.color = df.color.map({'white': 0, 'red': 1})
return df
def visualizacion(prediction_proba):
"""
crear un gráfico de barras de inferencia renderizado con streamlit en tiempo real
return type : matplotlib bar chart
"""
data = (prediction_proba[0]*100).round(2)
grad_percentage = pd.DataFrame(data=data, columns=['Percentage'], index=[
'Bajo', 'Mediano', 'Bueno', 'Excelente'])
ax = grad_percentage.plot(kind='barh', figsize=(
8, 6), color='#FB6942', zorder=30, width=0.5)
ax.legend().set_visible(False)
ax.set_xlim(xmin=0, xmax=100)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(True)
ax.spines['bottom'].set_visible(True)
ax.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
value = ax.get_xticks()
for tick in value:
ax.axvline(x=tick, linestyle='dashed',
alpha=0.9, color='#FB6942', zorder=1)
ax.set_xlabel(" Percentage(%) Confidence Level",
labelpad=2, weight='bold', size=12)
ax.set_ylabel("Wine Quality", labelpad=10, weight='bold', size=12)
ax.set_title('Prediction Confidence Level ', fontdict=None,
loc='center', pad=None, weight='bold')
st.set_option('deprecation.showPyplotGlobalUse', False)
st.pyplot()
return
st.write("""Esta aplicación predice la ** Calidad del vino ** mediante la entrada de ** características del vino ** a través del ** panel lateral ** """)
# leer en la imagen del vino y renderizar con streamlit
image = Image.open('image/blanco-vs-tinto.png')
st.image(image, caption='Tinto o Blanco', use_column_width=True)
codigo = st.expander('¿Necesitas Ayuda? 👉')
with codigo:
st.markdown(
"Encontraras todas la informacion del dataset en [Rusgar](https://github.com/rusgar/White-or-red-wine), estamos para ayudar ")
# colección de parámetros de entrada del usuario con side_bar
st.sidebar.header('Introduzca sus cualidades')
dataset = st.selectbox('Haz tu eleccion', ('Conjunto', 'White', 'Red'))
def get_data(dataset):
data_wine = pd.read_csv('data/df_wine.csv')
data_white = | pd.read_csv('data/wine_final_white_todo.csv') | pandas.read_csv |
#
# Copyright BrainPad Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
import bz2
import codecs
import csv
import gzip
import os
import tarfile
import zipfile
import pandas
from cliboa.core.validator import EssentialParameters
from cliboa.scenario.base import BaseStep
from cliboa.util.date import DateUtil
from cliboa.util.exception import CliboaException, InvalidCount, InvalidFormat
from cliboa.util.file import File
from cliboa.util.string import StringUtil
class FileBaseTransform(BaseStep):
"""
Base class of file extract classes
"""
def __init__(self):
super().__init__()
self._src_dir = ""
self._src_pattern = ""
self._dest_path = ""
self._dest_dir = None
self._dest_pattern = None
self._encoding = "utf-8"
def src_dir(self, src_dir):
self._src_dir = src_dir
def src_pattern(self, src_pattern):
self._src_pattern = src_pattern
def dest_path(self, dest_path):
self._dest_path = dest_path
def dest_dir(self, dest_dir):
os.makedirs(dest_dir, exist_ok=True)
self._dest_dir = dest_dir
def dest_pattern(self, dest_pattern):
self._dest_pattern = dest_pattern
def encoding(self, encoding):
self._encoding = encoding
def execute(self, *args):
# essential parameters check
valid = EssentialParameters(
self.__class__.__name__, [self._src_dir, self._src_pattern]
)
valid()
files = super().get_target_files(self._src_dir, self._src_pattern)
if len(files) != 1:
raise Exception("Input file must be only one.")
return files[0]
class FileDecompress(FileBaseTransform):
"""
Decompress the specified file
"""
def __init__(self):
super().__init__()
self._chunk_size = None
def chunk_size(self, chunk_size):
self._chunk_size = chunk_size
def execute(self, *args):
files = super().get_target_files(self._src_dir, self._src_pattern)
self._logger.info("Files found %s" % files)
for f in files:
_, ext = os.path.splitext(f)
if ext == ".zip":
self._logger.info("Decompress zip file %s" % f)
with zipfile.ZipFile(f) as zp:
zp.extractall(
self._dest_dir if self._dest_dir is not None else self._src_dir
)
elif ext == ".tar":
self._logger.info("Decompress tar file %s" % f)
with tarfile.open(f, "r:*") as tf:
tf.extractall(
self._dest_dir if self._dest_dir is not None else self._src_dir
)
elif ext == ".bz2":
self._logger.info("Decompress bz2 file %s" % f)
dcom_name = os.path.splitext(os.path.basename(f))[0]
decom_path = (
os.path.join(self._dest_dir, dcom_name)
if self._dest_dir is not None
else os.path.join(self._src_dir, dcom_name)
)
with bz2.open(f, mode="rb") as i, open(decom_path, mode="wb") as o:
while True:
buf = i.read(self._chunk_size)
if buf == b'':
break
o.write(buf)
elif ext == ".gz":
self._logger.info("Decompress gz file %s" % f)
dcom_name = os.path.splitext(os.path.basename(f))[0]
decom_path = (
os.path.join(self._dest_dir, dcom_name)
if self._dest_dir is not None
else os.path.join(self._src_dir, dcom_name)
)
with gzip.open(f, "rb") as i, open(decom_path, "wb") as o:
while True:
buf = i.read(self._chunk_size)
if buf == b'':
break
o.write(buf)
else:
raise CliboaException("Unmatched any available decompress type %s" % f)
class FileCompress(FileBaseTransform):
"""
Compress files
"""
def __init__(self):
super().__init__()
self._format = None
self._chunk_size = None
def format(self, format):
self._format = format.lower()
def chunk_size(self, chunk_size):
self._chunk_size = chunk_size
def execute(self, *args):
# essential parameters check
valid = EssentialParameters(
self.__class__.__name__, [self._src_dir, self._src_pattern, self._format]
)
valid()
files = super().get_target_files(self._src_dir, self._src_pattern)
self._logger.info("Files found %s" % files)
dir = self._dest_dir if self._dest_dir is not None else self._src_dir
for f in files:
if self._format == "zip":
self._logger.info("Compress file %s to zip." % f)
with zipfile.ZipFile(
os.path.join(dir, (os.path.basename(f) + ".zip")),
"w",
zipfile.ZIP_DEFLATED,
) as o:
o.write(f, arcname=os.path.basename(f))
elif self._format in ("gz", "gzip"):
self._logger.info("Compress file %s to gzip." % f)
com_path = os.path.join(dir, (os.path.basename(f) + ".gz"))
with open(f, "rb") as i, gzip.open(com_path, "wb") as o:
while True:
buf = i.read(self._chunk_size)
if buf == b'':
break
o.write(buf)
elif self._format in ("bz2", "bzip2"):
self._logger.info("Compress file %s to bzip2." % f)
com_path = os.path.join(dir, (os.path.basename(f) + ".bz2"))
with open(f, "rb") as i, bz2.open(com_path, "wb") as o:
while True:
buf = i.read(self._chunk_size)
if buf == b'':
break
o.write(buf)
class CsvColsExtract(FileBaseTransform):
"""
Remove columns from csv file.
"""
def __init__(self):
super().__init__()
self._columns = None
def columns(self, columns):
self._columns = columns
def execute(self, *args):
file = super().execute()
valid = EssentialParameters(self.__class__.__name__, [self._columns])
valid()
File().remove_columns(file, self._dest_path, self._columns)
class ColumnLengthAdjust(FileBaseTransform):
"""
Adjust csv (tsv) column to maximum length
"""
def __init__(self):
super().__init__()
self._adjust = {}
def adjust(self, adjust):
self._adjust = adjust
def execute(self, *args):
file = super().execute()
if self._adjust is None:
raise Exception(
"The essential parameter are not specified in %s."
% self.__class__.__name__
)
with codecs.open(file, mode="r", encoding=self._encoding) as fi, codecs.open(
self._dest_path, mode="w", encoding=self._encoding
) as fo:
reader = csv.DictReader(fi)
writer = csv.DictWriter(fo, reader.fieldnames)
writer.writeheader()
for row in reader:
for k, v in self._adjust.items():
f1 = row.get(k)
if len(f1) > v:
row[k] = f1[:v]
writer.writerow(row)
fo.flush()
class DateFormatConvert(FileBaseTransform):
"""
Convert csv (tsv) date field columns to another date field format columns
"""
def __init__(self):
super().__init__()
self._columns = []
self._formatter = None
def columns(self, columns):
self._columns = columns
def formatter(self, formatter):
self._formatter = formatter
def execute(self, *args):
file = super().execute()
valid = EssentialParameters(
self.__class__.__name__, [self._columns, self._formatter]
)
valid()
_, ext = os.path.splitext(file)
if ext == ".csv":
delimiter = ","
elif ext == ".tsv":
delimiter = "\t"
with codecs.open(file, mode="r", encoding=self._encoding) as fi, codecs.open(
self._dest_path, mode="w", encoding=self._encoding
) as fo:
reader = csv.DictReader(fi, delimiter=delimiter)
writer = csv.DictWriter(fo, reader.fieldnames)
writer.writeheader()
date_util = DateUtil()
for row in reader:
for column in self._columns:
r = row.get(column)
if not r:
continue
row[column] = date_util.convert_date_format(r, self._formatter)
writer.writerow(row)
fo.flush()
self._logger.info("Finish %s" % self.__class__.__name__)
class ExcelConvert(FileBaseTransform):
"""
Convert excel to other format
"""
def __init__(self):
super().__init__()
def execute(self, *args):
# essential parameters check
valid = EssentialParameters(
self.__class__.__name__,
[self._src_dir, self._src_pattern, self._dest_dir, self._dest_pattern],
)
valid()
# get a target file
target_files = super().get_target_files(self._src_dir, self._src_pattern)
if len(target_files) == 0:
raise InvalidCount(
"An input file %s does not exist."
% os.path.join(self._src_dir, self._src_pattern)
)
elif len(target_files) > 1:
self._logger.error("Hit target files %s" % target_files)
raise InvalidCount("Input files must be only one.")
self._logger.info(
"A target file to be converted: %s" % os.path.join(target_files[0])
)
# convert
_, dest_ext = os.path.splitext(self._dest_pattern)
if dest_ext != ".csv":
raise InvalidFormat(
"%s is not supported format in %s. The supported format is .csv"
% (dest_ext, self._dest_pattern)
)
df = | pandas.read_excel(target_files[0], encoding=self._encoding) | pandas.read_excel |
import pandas as pd
from scipy.spatial.distance import cdist
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
def read_aff_elements(path, *args, **kwargs):
return | pd.read_csv(filepath_or_buffer=path, *args, **kwargs) | pandas.read_csv |
# coding: utf-8
from sklearn.ensemble import IsolationForest
import os
import gc
import time
import json
import warnings
import logging
import numpy as np
import pandas as pd
from sklearn import preprocessing
from util import DatasetStatistics
from sklearn.base import BaseEstimator, TransformerMixin
__author__ = "<NAME>, <NAME>"
class BlobPreprocessor(BaseEstimator, TransformerMixin):
def __init__(self, selected_attributes=None, seed=23, score_outliers=False, remove_outliers=True,
remove_poor_quality=True, outlier_fraction=0.005, isolation_tree_sample=2000,
isolation_forest_size=200, validation_data=None, n_jobs=-1):
self.selected_attributes = selected_attributes
self.seed = seed
self.score_outliers = score_outliers
self.remove_outliers = remove_outliers
self.remove_poor_quality = remove_poor_quality
self.outlier_fraction = outlier_fraction
self.isolation_tree_sample = isolation_tree_sample
self.isolation_forest_size = isolation_forest_size
self.n_jobs = n_jobs
self.validation_data = validation_data
self.isolation_forest = None
self.column_names = None
def __repr__(self):
result = []
for attr, value in iter(self.__dict__.items()):
result.append("=".join([attr, str(value)]))
return '%s(%s)' % (self.__class__.__name__, ", ".join(sorted(result)))
def fit(self, X, y=None):
self.isolation_forest = IsolationForest(n_estimators=self.isolation_forest_size,
max_samples=self.isolation_tree_sample,
contamination=self.outlier_fraction,
random_state=self.seed,
n_jobs=self.n_jobs)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if self.remove_outliers:
self._remove_outliers(X, y)
elif self.score_outliers:
self._score_outliers(X, y)
if self.remove_poor_quality:
self._filter_low_quality_examples(X, y)
return self
def transform(self, X, y=None):
Xt = X
if self.score_outliers:
anomaly_scores = self.isolation_forest.decision_function(X)
if self.selected_attributes is not None:
Xt = X.loc[:, self.selected_attributes]
if self.score_outliers:
Xt.loc[:, "anomaly_score"] = anomaly_scores
self.column_names = Xt.columns.values
return Xt
def _filter_low_quality_examples(self, X, y):
if self.validation_data is not None:
validation_df = read_validation_data(self.validation_data)
# repeated title, but different values -> multiple conformations
is_duplicate_title = validation_df.duplicated(subset="title", keep=False)
validation_df.set_index("title", inplace=True)
nonidentical_duplicates = validation_df[is_duplicate_title.values].index
multiple_conformations = nonidentical_duplicates[nonidentical_duplicates.isin(X.index)]
multiple_conformations_num = multiple_conformations.shape[0]
if multiple_conformations_num > 0:
logging.info("Removing %s examples with multiple conformations from training data",
str(multiple_conformations_num))
X.drop(multiple_conformations, inplace=True)
y.drop(multiple_conformations, inplace=True)
def _score_outliers(self, X, y):
self.isolation_forest.fit(X)
def _remove_outliers(self, X, y):
gc.collect()
self._score_outliers(X, y)
outlier_pred = self.isolation_forest.predict(X)
outliers = X[outlier_pred == -1].index
outlier_num = outliers.shape[0]
gc.collect()
if outlier_num > 0:
logging.info("Removing %s outliers from training data", str(outlier_num))
X.drop(outliers, inplace=True)
y.drop(outliers, inplace=True)
class DatasetCleaner:
MAX_R_FACTOR = 0.3
MIN_OCCUPANCY = 0.3
POLY_THRESHOLD = 30
MIN_RSCC = 0.6
UNKNOWN_LIGANDS = ["UNK", "UNX", "UNL", "DUM"]
ANY_NUCLEOTYDE = ["N"]
UNLABELED = ["BLOB", "", "?"]
PEPTIDES_DNA_RNA = {'ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET',
'MSE', 'PHE', 'PRO', 'SEC', 'SER', 'THR', 'TRP', 'TYR', 'VAL',
'DA', 'DG', 'DT', 'DC', 'DU',
'A', 'G', 'T', 'C', 'U', }
WATER = ["HOH", "H20", "WAT", "DOD"]
IGNORED_RES_NAMES = set().union(UNKNOWN_LIGANDS, ANY_NUCLEOTYDE, UNLABELED, PEPTIDES_DNA_RNA, WATER)
KEY_ATTRIBUTE = "title"
PDBID_ATTRIBUTE = "pdb_code"
ELECTRON_ATTRIBUTE = "part_00_electrons"
ILLEGAL_ATTRIBUTES = ["pdb_code", "res_id", "chain_id",
"local_res_atom_count", "local_res_atom_non_h_count",
"local_res_atom_non_h_occupancy_sum",
"local_res_atom_non_h_electron_sum",
"local_res_atom_non_h_electron_occupancy_sum",
"local_res_atom_C_count",
"local_res_atom_N_count", "local_res_atom_O_count",
"local_res_atom_S_count",
"dict_atom_non_h_count",
"dict_atom_non_h_electron_sum", "dict_atom_C_count",
"dict_atom_N_count", "dict_atom_O_count",
"dict_atom_S_count",
"fo_col", "fc_col", "weight_col", "grid_space",
"solvent_radius",
"solvent_opening_radius",
"part_step_FoFc_std_min", "part_step_FoFc_std_max",
"part_step_FoFc_std_step",
"local_volume", "res_coverage", "blob_coverage",
"blob_volume_coverage", "blob_volume_coverage_second",
"res_volume_coverage", "res_volume_coverage_second",
"skeleton_data"
]
GLOBALS = ["resolution_max_limit", "part_step_FoFc_std_min",
"part_step_FoFc_std_max", "part_step_FoFc_std_step"]
GRAPH_ATTRIBUTES = ["local_maxi_graph_low_cycles", "local_maxi_graph_low_cycle_6", "local_maxi_graph_low_cycle_7",
"local_maxi_graph_low_cycle_5", "local_maxi_graph_low_closeness_000_002",
"local_maxi_graph_low_closeness_002_004", "local_maxi_graph_low_closeness_004_006",
"local_maxi_graph_low_closeness_006_008", "local_maxi_graph_low_closeness_008_010",
"local_maxi_graph_low_closeness_010_012", "local_maxi_graph_low_closeness_012_014",
"local_maxi_graph_low_closeness_014_016", "local_maxi_graph_low_closeness_016_018",
"local_maxi_graph_low_diameter", "local_maxi_graph_low_radius",
"local_maxi_graph_low_deg_5_plus", "local_maxi_graph_low_density",
"local_maxi_graph_low_periphery", "local_maxi_graph_low_graph_clique_number",
"local_maxi_graph_low_nodes", "local_maxi_graph_low_avg_degree", "local_maxi_graph_low_edges",
"local_maxi_graph_low_average_clustering", "local_maxi_graph_low_center",
"local_maxi_graph_low_deg_4", "local_maxi_graph_low_deg_0",
"local_maxi_graph_low_deg_1", "local_maxi_graph_low_deg_2", "local_maxi_graph_low_deg_3",
"local_maxi_graph_hi_cycles", "local_maxi_graph_hi_cycle_6", "local_maxi_graph_hi_cycle_7",
"local_maxi_graph_hi_cycle_5", "local_maxi_graph_hi_closeness_000_002",
"local_maxi_graph_hi_closeness_002_004", "local_maxi_graph_hi_closeness_004_006",
"local_maxi_graph_hi_closeness_006_008", "local_maxi_graph_hi_closeness_008_010",
"local_maxi_graph_hi_closeness_010_012", "local_maxi_graph_hi_closeness_012_014",
"local_maxi_graph_hi_closeness_014_016", "local_maxi_graph_hi_closeness_016_018",
"local_maxi_graph_hi_diameter", "local_maxi_graph_hi_radius",
"local_maxi_graph_hi_deg_5_plus", "local_maxi_graph_hi_density",
"local_maxi_graph_hi_periphery", "local_maxi_graph_hi_graph_clique_number",
"local_maxi_graph_hi_nodes", "local_maxi_graph_hi_avg_degree", "local_maxi_graph_hi_edges",
"local_maxi_graph_hi_average_clustering", "local_maxi_graph_hi_center",
"local_maxi_graph_hi_deg_4", "local_maxi_graph_hi_deg_0",
"local_maxi_graph_hi_deg_1", "local_maxi_graph_hi_deg_2", "local_maxi_graph_hi_deg_3",
]
def __init__(self, data_frame, class_attribute="res_name", filter_examples=False, unique_attributes=None,
max_num_of_classes=200, min_examples_per_class=None, drop_attributes=ILLEGAL_ATTRIBUTES + GLOBALS,
select_attributes=None, where_title=None, sort_by_title=None, seed=23, drop_parts=range(3, 10),
validation_data=None, remove_poor_quality_data=True, keep=None, remove_poorly_covered=True,
blob_coverage_threshold=0.1, res_coverage_threshold=0.2, twilight_data=None, combine_ligands=True,
remove_symmetry_ligands=True, ligand_selection=None, discretize_add_noise=False,
discretize_round_noise=False, min_electron_pct=0.5, nonH_atom_range=None, resolution_range=(0, 4),
non_xray_pdb_list=None, edstats_data=None, min_ZOa=None, max_ZDa=None, training_data=True):
"""
Initializes a new preprocessor object with the specified settings.
"""
self.data_frame = data_frame
self.class_series = None
self.filter_examples = filter_examples
self.unique_attributes = unique_attributes
self.max_num_of_classes = max_num_of_classes
self.min_examples_per_class = min_examples_per_class
self.drop_attributes = drop_attributes
self.select_attributes = select_attributes
self.where_title = where_title
self.sort_by_title = sort_by_title
self.label_encoder = preprocessing.LabelEncoder()
self.class_attribute = class_attribute
self.seed = seed
self.drop_parts = drop_parts
self.validation_data = validation_data
self.remove_poor_quality_data = remove_poor_quality_data
self.keep = keep
self.remove_poorly_covered = remove_poorly_covered
self.blob_coverage_threshold = blob_coverage_threshold
self.res_coverage_threshold = res_coverage_threshold
self.twilight_data = twilight_data
self.combine_ligands = combine_ligands
self.remove_symmetry_ligands = remove_symmetry_ligands
self.ligand_selection = ligand_selection
self.discretize_add_noise = discretize_add_noise
self.discretize_round_noise = discretize_round_noise
self.min_electron_pct = min_electron_pct
self.nonH_atom_range = nonH_atom_range
self.resolution_range = resolution_range
self.non_xray_pdb_list = non_xray_pdb_list
self.edstats_data = edstats_data
self.min_ZOa = min_ZOa
self.max_ZDa = max_ZDa
self.training_data = training_data
self.clean()
@property
def classes(self):
"""
Classes of the dataset.
"""
if len(self.label_encoder.classes_) > 0:
return self.label_encoder.classes_
else:
raise Exception("Data not prepared yet!")
def __repr__(self):
result = []
for attr, value in iter(self.__dict__.items()):
if attr == "where_title" and self.where_title is not None:
result.append("=".join([attr, str(True)]))
elif attr == "drop_attributes":
if self.drop_attributes == self.ILLEGAL_ATTRIBUTES + self.GLOBALS:
result.append("=".join([attr, "ILLEGAL_ATTRIBUTES+GLOBALS"]))
else:
result.append("=".join([attr, str(value)]))
elif attr != "data_frame" and attr != "class_series":
result.append("=".join([attr, str(value)]))
return '%s(%s)' % (self.__class__.__name__, ", ".join(sorted(result)))
def clean(self):
logging.info("Cleaning data...")
if self.training_data:
logging.info("Initial dataset:\r\n%s", DatasetStatistics(self.data_frame,
self.data_frame.loc[:, self.class_attribute]))
try:
non_xray_df = read_non_xray_pdb_list(self.non_xray_pdb_list)
non_xray = self.data_frame[(self.data_frame[self.PDBID_ATTRIBUTE].isin(non_xray_df.loc[:, "pdbid"]))]
non_xray_num = non_xray.shape[0]
non_xray_unique = len(pd.unique(non_xray.loc[:, self.PDBID_ATTRIBUTE]))
if non_xray_num > 0:
logging.info(
"Removing %s examples taken from PDB entries with experimental methods other tha X-ray "
"diffraction (%s non-xray PDB files)", str(non_xray_num), str(non_xray_unique))
self.data_frame = self.data_frame.drop(non_xray.index)
except:
logging.warning("Could not find list of non-xray pdb files")
# pdb_code pdbid
no_electrons = self.data_frame[~(self.data_frame[self.ELECTRON_ATTRIBUTE] > 0)].index
no_electrons_num = no_electrons.shape[0]
if no_electrons_num > 0:
logging.info("Removing %s examples with no electron density", str(no_electrons_num))
self.data_frame = self.data_frame.drop(no_electrons)
if self.training_data:
if self.where_title is not None:
self.data_frame = self.data_frame.loc[self.data_frame[self.KEY_ATTRIBUTE].isin(self.where_title), :]
if self.sort_by_title:
self.data_frame = self.data_frame.sort_values(by=self.KEY_ATTRIBUTE)
if self.filter_examples:
self._drop_duplicates(self.unique_attributes, self.keep)
if self.remove_poorly_covered:
self._filter_poorly_covered_examples()
self._filter_examples(self.max_num_of_classes, self.min_examples_per_class, self.ligand_selection,
self.nonH_atom_range, self.resolution_range)
self.data_frame.set_index(self.KEY_ATTRIBUTE, inplace=True)
self._drop_attributes(self.drop_attributes)
self._drop_parts(self.drop_parts)
self._feature_engineering()
self._discretize(add_noise=self.discretize_add_noise, round_noise=self.discretize_round_noise)
self._zero_nas()
self._convert_columns_to_floats()
self._select_attributes(self.select_attributes)
if self.training_data:
logging.info("Dataset after preprocessing:\r\n%s",
DatasetStatistics(self.data_frame, self.data_frame.loc[:, self.class_attribute]))
def prepare_for_classification(self, selected_class_attribute, all_class_attributes=None):
"""
Prepares the dataset for training and/or testing
:param selected_class_attribute: the attribute that will be used during learning
:type selected_class_attribute: str
:param all_class_attributes: all possible class attributes in the dataset. If not given defaults
to the selected class attribute. This list ensures that attributes that contain direct information about the
true class will not be used during training.
:type all_class_attributes: list of str
:return: the dataset divided into a data frame with unlabeled examples and a data frame (series) only with
labels
"""
gc.collect()
logging.info("Preparing data for classification...")
if all_class_attributes is None:
all_class_attributes = [selected_class_attribute]
self.class_series = self.data_frame[selected_class_attribute]
drop_class_attributes = [attr for attr in all_class_attributes if attr in self.data_frame.columns]
self.data_frame = self.data_frame.drop(drop_class_attributes, axis=1)
self.label_encoder.fit(self.class_series)
labels_frame = pd.Series(self.label_encoder.transform(self.class_series), self.data_frame.index)
gc.collect()
return self.data_frame, labels_frame
def _feature_engineering(self):
delta_attributes = ["electrons", "std", "skewness", "mean", "volume", "parts", "shape_segments_count",
"density_segments_count", "density_sqrt_E1", "density_sqrt_E2", "density_sqrt_E3"]
for i in range(1, 3):
for delta_attribute in delta_attributes:
try:
self.data_frame.loc[:, "delta_" + delta_attribute + "_" + str(i)] = \
self.data_frame.loc[:, "part_0" + str(i - 1) + "_" + delta_attribute] - \
self.data_frame.loc[:, "part_0" + str(i) + "_" + delta_attribute]
gc.collect()
except Exception as ex:
logging.warning("Feature engineering: %s", ex)
over_attributes = ["electrons", "std", "skewness", "shape_segments_count", "density_segments_count"]
for i in range(0, 3):
for over_attribute in over_attributes:
try:
self.data_frame.loc[:, over_attribute + "_over_volume_0" + str(i)] = \
self.data_frame.loc[:, "part_0" + str(i) + "_" + over_attribute] / \
self.data_frame.loc[:, "part_0" + str(i) + "_volume"]
gc.collect()
except Exception as ex:
logging.warning("Feature engineering: %s", ex)
over_attributes = ["volume", "electrons", "std", "skewness"]
for i in range(0, 3):
for over_attribute in over_attributes:
try:
self.data_frame.loc[:, over_attribute + "_over_resolution_0" + str(i)] = \
self.data_frame.loc[:, "part_0" + str(i) + "_" + over_attribute] / \
self.data_frame.loc[:, "resolution"]
gc.collect()
except Exception as ex:
logging.warning("Feature engineering: %s", ex)
self.data_frame.loc[:, "percent_cut"] = self.data_frame.loc[:, "local_cut_by_mainchain_volume"] / \
(self.data_frame.loc[:, "local_cut_by_mainchain_volume"] +
self.data_frame.loc[:, "part_00_volume"])
gc.collect()
def _discretize(self, add_noise=False, round_noise=False):
self.data_frame.loc[:, "resolution"] = self.data_frame.loc[:, "resolution"].round(decimals=1)
self.data_frame.loc[:, "local_std"] = self.data_frame.loc[:, "local_std"].round(decimals=2)
if add_noise:
noise = np.random.uniform(-0.15, 0.15, self.data_frame.shape[0])
if round_noise:
noise = noise.round(decimals=1)
self.data_frame.loc[:, "resolution"] = self.data_frame.loc[:, "resolution"] + noise
noise = np.random.uniform(-0.015, 0.015, self.data_frame.shape[0])
if round_noise:
noise = noise.round(decimals=2)
self.data_frame.loc[:, "local_std"] = self.data_frame.loc[:, "local_std"] + noise
def _drop_duplicates(self, subset, keep="largest"):
# Leave one row from a set of identical rows
self.data_frame = self.data_frame.drop_duplicates(keep="first")
if subset is None or not subset or keep is None:
return
if keep == "largest":
if subset != [self.KEY_ATTRIBUTE]:
warnings.warn("Leaving largest volume when filtering by something different than the key attribute")
self.data_frame = self.data_frame.sort_values(by=[self.KEY_ATTRIBUTE, "part_00_volume"], ascending=False)
self.data_frame = self.data_frame.drop_duplicates(subset=subset, keep="first")
else:
self.data_frame = self.data_frame.drop_duplicates(subset=subset, keep=keep)
def _filter_poorly_covered_examples(self):
poorly_covered = self.data_frame[self.data_frame.blob_volume_coverage < self.blob_coverage_threshold].index
poorly_covered_num = poorly_covered.shape[0]
if poorly_covered_num > 0:
logging.info("Removing %s examples with blobs covered by the model below %s%%", str(poorly_covered_num),
str(self.blob_coverage_threshold * 100))
self.data_frame = self.data_frame.drop(poorly_covered)
res_poorly_covered = self.data_frame[self.data_frame.res_volume_coverage < self.res_coverage_threshold].index
res_poorly_covered_num = res_poorly_covered.shape[0]
if res_poorly_covered_num > 0:
logging.info("Removing %s examples with models covered by the blob below %s%%", str(res_poorly_covered_num),
str(self.res_coverage_threshold * 100))
self.data_frame = self.data_frame.drop(res_poorly_covered)
def _res_threshold_attributes(self, attributes, res_threshold, dummy_value=-10):
res_thresholded = self.data_frame.resolution > res_threshold
res_thresholded_num = np.sum(res_thresholded)
if res_thresholded_num > 0:
logging.info("Thresholding graph attributes for %d examples" % res_thresholded_num)
self.data_frame.loc[res_thresholded, attributes] = dummy_value
def _calculate_top_n_classes(self, max_num_of_classes):
"""
Calculates the the top n most frequent classes.
:param max_num_of_classes: maximum number of expected classes
:type max_num_of_classes: int
:return: list of max_num_of_classes classes
"""
if max_num_of_classes <= 0:
raise Exception("The number of classes cannot be smaller than 1!")
gc.collect()
classes = self.data_frame.loc[:, self.class_attribute].copy()
gc.collect()
res_name_count = classes.value_counts()
gc.collect()
res_name_count.sort_values(inplace=True, ascending=False)
if res_name_count.__len__() < max_num_of_classes:
raise Exception("Not enough classes in the dataset!")
return res_name_count.iloc[:max_num_of_classes].index.values
def _filter_examples(self, max_num_of_classes, min_examples_per_class, ligand_selection, nonH_atom_range,
resolution_range):
"""
Excludes unnecessary and underrepresented class examples from the dataset.
:param max_num_of_classes: maximum number of expected classes after filtering (used when
min_examples_per_class=None)
:type max_num_of_classes: int
:param min_examples_per_class: minimum number of examples per class (underepresented classes will be filtered
out)
:type min_examples_per_class: int
"""
if nonH_atom_range is not None:
min_atoms = nonH_atom_range[0]
max_atoms = nonH_atom_range[1]
poor_atom_count = (self.data_frame.dict_atom_non_h_count < min_atoms) | \
(self.data_frame.dict_atom_non_h_count > max_atoms)
poor_atom_count_num = np.sum(poor_atom_count)
if poor_atom_count_num > 0:
logging.info("Removing %s examples with non-H atom count < %s or > %s",
str(poor_atom_count_num), str(min_atoms), str(max_atoms))
self.data_frame = self.data_frame.drop(self.data_frame[poor_atom_count].index)
if resolution_range is not None:
min_res = resolution_range[0]
max_res = resolution_range[1]
poor_res = (self.data_frame.resolution < min_res) | (self.data_frame.resolution > max_res)
poor_res_num = np.sum(poor_res)
if poor_res_num > 0:
logging.info("Removing %s examples with resolution < %s or > %s",
str(poor_res_num), str(min_res), str(max_res))
self.data_frame = self.data_frame.drop(self.data_frame[poor_res].index)
# Potential res_name N/A problem
if np.sum(self.data_frame.loc[:, self.class_attribute].isnull()) > 0:
raise Exception("N/A class values in dataset!")
if ligand_selection is not None:
logging.info("Selecting only user-defined ligands")
self.data_frame = self.data_frame[self.data_frame.loc[:, self.class_attribute].isin(ligand_selection)]
return
# non-ligands and unknown ligands
ignored_ligands = self.data_frame.loc[:, self.class_attribute].isin(self.IGNORED_RES_NAMES)
logging.info("Removing %d unknown and non-ligand structures", np.sum(ignored_ligands))
self.data_frame = self.data_frame[~ignored_ligands]
if self.combine_ligands:
logging.info("Creating ligand complexes")
res_names = self.data_frame.loc[:, self.class_attribute].unique()
self.data_frame.loc[:, self.class_attribute] = self.data_frame.apply(self._detect_polyligand, axis=1,
args=(res_names, self.POLY_THRESHOLD,
self.IGNORED_RES_NAMES))
mislabeled_ligands = self.data_frame[self.class_attribute] == "_DELETE_"
mislabeled_ligands_num = np.sum(mislabeled_ligands)
if mislabeled_ligands_num > 0:
logging.info("Removing %d poorly covered ligand complexes", mislabeled_ligands_num)
self.data_frame = self.data_frame.drop(self.data_frame[mislabeled_ligands].index)
if self.remove_symmetry_ligands:
self.data_frame.loc[:, "is_symmetry_ligand"] = self.data_frame.res_coverage.str[1:-1].apply(
lambda x: self._detect_symmetry_ligands(x)
)
symmetry_ligand_num = np.sum(self.data_frame.is_symmetry_ligand)
if symmetry_ligand_num > 0:
logging.info("Removing %d ligands centered in a symmetry", symmetry_ligand_num)
self.data_frame = self.data_frame.drop(self.data_frame[self.data_frame.is_symmetry_ligand].index)
self.data_frame = self.data_frame.drop("is_symmetry_ligand", axis=1)
if self.remove_poor_quality_data:
# removing examples with extremely poor quality
# ligands modeles without over half of the non-H electrons
poor_electrons = self.data_frame.local_res_atom_non_h_electron_sum \
< self.min_electron_pct * self.data_frame.dict_atom_non_h_electron_sum
poor_electrons_num = np.sum(poor_electrons)
if poor_electrons_num > 0:
logging.info("Removing %s ligands without over %d %% of non-H electrons modeled",
str(poor_electrons_num), int(100 * self.min_electron_pct))
self.data_frame = self.data_frame.drop(self.data_frame[poor_electrons].index)
if self.validation_data is not None:
# repeated title, but different values -> multiple conformations
validation_df = read_validation_data(self.validation_data)
validation_df = validation_df.drop_duplicates(subset="title", keep=False)
joined_data = self.data_frame.merge(validation_df, on="title", how="left")
poor_r_factor = joined_data.EDS_R > self.MAX_R_FACTOR
poor_occupancy = joined_data.avgoccu < self.MIN_OCCUPANCY
poor_rscc = joined_data.rscc < self.MIN_RSCC
poor_quality = joined_data[poor_r_factor | poor_occupancy | poor_rscc]
poor_r_factor_num = np.sum(poor_r_factor)
poor_occupancy_num = np.sum(poor_occupancy)
poor_rscc_num = np.sum(poor_rscc)
poor_quality_num = poor_quality.shape[0]
if poor_quality_num > 0:
logging.info("Removing %d examples with R > %s (%d) or occupancy < %s (%d) or RSCC < %s (%d)",
poor_quality_num, self.MAX_R_FACTOR, poor_r_factor_num,
self.MIN_OCCUPANCY, poor_occupancy_num, self.MIN_RSCC, poor_rscc_num)
self.data_frame = self.data_frame[~self.data_frame.title.isin(poor_quality.title)]
if self.twilight_data is not None:
# repeated title, but different values -> multiple conformations
twilight_df = read_twilight_data(self.twilight_data)
twilight_df = twilight_df.drop_duplicates(subset="title", keep=False)
joined_data = self.data_frame.merge(twilight_df, on="title", how="left")
poor_quality = joined_data[joined_data.Valid == "Y"]
poor_quality_num = poor_quality.shape[0]
if poor_quality_num > 0:
logging.info("Removing %d examples flagged by Twilight (%s)", poor_quality_num,
os.path.basename(self.twilight_data))
self.data_frame = self.data_frame[~self.data_frame.title.isin(poor_quality.title)]
if self.edstats_data is not None:
edstats_df = read_edstats_data(self.edstats_data)
edstats_df = edstats_df.drop_duplicates(subset="title", keep=False)
joined_data = self.data_frame.merge(edstats_df, on="title", how="left")
poor_ZOa = joined_data.ZOa < self.min_ZOa
poor_ZDa = joined_data.ZDa >= self.max_ZDa
poor_quality = joined_data[poor_ZOa | poor_ZDa]
poor_ZOa_num = np.sum(poor_ZOa)
poor_ZDa_num = np.sum(poor_ZDa)
poor_quality_num = poor_quality.shape[0]
if poor_quality_num > 0:
logging.info("Removing %d examples with ZOa < %s (%d) "
"or ZDa >= %s (%d)",
poor_quality_num, self.min_ZOa, poor_ZOa_num,
self.max_ZDa, poor_ZDa_num)
self.data_frame = self.data_frame[~self.data_frame.title.isin(poor_quality.title)]
# minimal number of examples/maximum number of classes
if min_examples_per_class is not None:
logging.info("Limiting dataset to classes with at least %d examples", min_examples_per_class)
count_attribute = self.class_attribute + "_count"
res_name_count = self.data_frame.loc[:, self.class_attribute].value_counts()
self.data_frame.loc[:, count_attribute] = self.data_frame.loc[:, self.class_attribute] \
.map(res_name_count).astype(int)
self.data_frame = self.data_frame[self.data_frame[count_attribute] >= min_examples_per_class]
self.data_frame = self.data_frame.drop(count_attribute, axis=1)
else:
logging.info("Limiting dataset to %d most popular classes", max_num_of_classes)
gc.collect()
top_n_classes = self._calculate_top_n_classes(max_num_of_classes)
gc.collect()
classes = self.data_frame.loc[:, self.class_attribute].copy()
isin_vector = classes.isin(top_n_classes)
gc.collect()
self.data_frame = self.data_frame[isin_vector]
gc.collect()
def _drop_attributes(self, attributes):
"""
Drops selected attributes and makes the data frame smaller.
:param attributes: list of attributes to be dropped
:type attributes: list of str
"""
if attributes is not None:
self.data_frame = self.data_frame.drop(attributes, axis=1)
def _drop_parts(self, parts):
"""
Drops selected parts and makes the data frame smaller.
:param parts: list of part numbers
:type parts: list of int
"""
if parts is not None:
attributes = []
for part in parts:
attributes.extend([col for col in list(self.data_frame) if col.startswith("part_0" + str(part))])
self.data_frame = self.data_frame.drop(attributes, axis=1)
def _select_attributes(self, attributes):
"""
Leaves selected attributes and makes the data frame smaller.
:param attributes: list of attributes to be left after filtering
:type attributes: list of str
"""
if attributes is not None:
self.data_frame = self.data_frame.loc[:, attributes]
def _zero_nas(self):
"""
Turns NA values into zeros.
"""
self.data_frame = self.data_frame.fillna(0.0)
def _convert_columns_to_floats(self):
"""
Converts all columns into floats and limits their values to the float range.
"""
tmp_frame = self.data_frame.loc[:, [self.class_attribute]]
for col in list(tmp_frame):
try:
self.data_frame = self.data_frame.drop(col, axis=1)
except ValueError as ex:
logging.warning(ex)
self.data_frame[self.data_frame > 10e32] = 10e32
self.data_frame[self.data_frame < -10e32] = -10e32
self.data_frame = self.data_frame.astype("float32")
self.data_frame = pd.concat([self.data_frame, tmp_frame], axis=1)
@staticmethod
def _combine_symmetries(res_coverage_dict):
new_dict = dict()
for key, value in res_coverage_dict.items():
modified_key = ":".join(key.split(":")[:-1])
new_dict[modified_key] = max(new_dict.setdefault(modified_key, value), value)
return new_dict
@staticmethod
def _detect_symmetry_ligands(res_coverage_col):
new_res_dict = dict()
res_coverage_dict = json.loads(res_coverage_col)
for key, value in res_coverage_dict.items():
modified_key = ":".join(key.split(":")[:-1])
new_res_dict[modified_key] = max(new_res_dict.setdefault(modified_key, value), value)
return len(new_res_dict) == 1 and len(res_coverage_dict) > 1
@staticmethod
def _detect_polyligand(row, res_names, coverage_threshold, ignored_res):
from operator import itemgetter
res_coverage_col = row["res_coverage"][1:-1]
res_coverage_dict = DatasetCleaner._combine_symmetries(json.loads(res_coverage_col))
sorted_keys = sorted(((k.split(":")[0], v) for k, v in res_coverage_dict.items()
if (k.split(":")[0] not in ignored_res)))
if len(sorted_keys) == 1:
return sorted_keys[0][0]
else:
new_res_name = "_".join((k for k, v in sorted_keys if (v >= coverage_threshold)))
if new_res_name == "":
new_res_name = max(sorted_keys, key=itemgetter(1))[0]
if "_" not in new_res_name and new_res_name not in res_names:
new_res_name = "_DELETE_"
return new_res_name
def read_dataset(path):
logging.info("Reading: %s", os.path.basename(path))
start = time.time()
df_header = pd.read_csv(path, sep=";", header=0, na_values=["n/a", "nan", ""],
keep_default_na=False, engine="c", nrows=1)
string_cols = ["title", "pdb_code", "res_id", "res_name", "chain_id", "fo_col", "fc_col", "res_coverage",
"blob_coverage", "skeleton_data"]
float_cols = {c: np.float64 for c in list(df_header) if c not in string_cols}
for string_col in string_cols:
float_cols[string_col] = "str"
df = pd.read_csv(path, sep=";", header=0, na_values=["n/a", "nan", ""],
keep_default_na=False, engine="c", dtype=float_cols)
logging.info("Read dataset in: %.2f seconds", time.time() - start)
return df
def read_merge_datasets(paths):
dfs = []
for path in paths:
dfs.append(read_dataset(path))
return | pd.concat(dfs, ignore_index=True) | pandas.concat |
'''
Expression.py - wrap various differential expression tools
===========================================================
:Tags: Python
Purpose
-------
This module provides tools for differential expression analysis
for a variety of methods.
Methods implemented are:
DESeq
EdgeR
ttest
The aim of this module is to run these individual tools and
output a table in a common format.
Usage
-----
Documentation
-------------
Requirements:
* DESeq >= 1.17
* DESeq2 >= 1.5.62
* edgeR >= 3.7.16
* gplots >= 2.14.2
* ggplot2 >= 1.0.0
* reshape >= 0.8.5
* RColorBrewer >= 1.0.5
* grid >= 3.1.1
* limma >= 3.21.18
* samr >= 2.0 (optional)
* siggenes >= 1.39.0 (optional)
Code
----
To do:
--check contrasts against design model
'''
import os
import math
import numpy
import sys
import collections
import itertools
import re
import pandas
import copy
import numpy as np
from scipy.stats import ttest_ind
import matplotlib
import matplotlib.pyplot as plt
import rpy2
from rpy2.robjects import r
from rpy2.robjects import r as R
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import FloatVector
try:
from rpy2.rinterface import RRuntimeError
except ImportError:
from rpy2.rinterface_lib.embedded import RRuntimeError
try:
import cgatcore.experiment as E
import cgatcore.iotools as iotools
import cgat.Stats as Stats
except ImportError:
import experiment as E
import iotools
import Stats
# activate pandas/rpy conversion
# pandas2ri.activate()
# AH: Only do this on demand, module might not be
# be able to be imported if there are any issues.
# grdevices = importr('grDevices')
def runDETest(raw_DataFrame,
design_file,
outfile,
de_caller,
**kwargs):
''' provide higher level API to run tools with default setting '''
if de_caller.lower() == "deseq":
pass
else:
raise ValueError("Unknown caller")
def splitModel(model):
'''returns the terms in the model'''
return [x for x in
re.split("[\.:,~+\s*]", re.sub("~(\s*)?", "", model)) if
len(x) > 0]
def adjustPvalues(p_values):
'''return a list of BH adjusted pvalues'''
# import r stats module to adjust pvalues
stats = importr('stats')
adj_pvalues = list(stats.p_adjust(FloatVector(p_values), method='BH'))
return adj_pvalues
def pvaluesToSignficant(p_values, fdr):
'''return a list of bools for significance'''
return [int(x < fdr) for x in p_values]
class ExperimentalDesign(object):
"""Objects representing experimental designs.
This class takes an experimental design in tabular
form and exports several convenience functions and
attributes.
`filename_or_table` can be a filename of a tab-separated table
with the following columns.
track
the sample name
include
whether or not this sample should be included
in the design
groups
a label grouping several samples into a group
pair
for paired tests, a numeric identifier linking
samples that are paired.
An example of an experimtal design with two groups and paired
samples is below::
track include group pair
sample1 1 treatment 1
sample2 1 treatment 2
sample3 1 control 1
sample4 1 control 2
When providing `filename_or_table`, the `include` column is used
to directly filter the design to remove any non-included samples.
Additional columns will be added as factors to the design.
Alternatively, `filename_or_table` can be a pandas DataFrame with
sample names as row index and the appropriate columns.
Attributes
-----------
table : pandas DataFrame
dataframe object describing the design
groups : list
list of groups in the design
conditions : list
group for each sample
pairs : list
pair status for each sample
samples: list
sample names
factors: list
factors for each sample
has_replicates : bool
True if at least one group has multiple samples
has_pairs : bool
True if design is a paired design
"""
def __init__(self, filename_or_table):
# read in table in the constructor for ExpDesign
# e.g design = ExpDesign(pd.read_csv(...))
if isinstance(filename_or_table, str):
self.table = pandas.read_csv(filename_or_table, sep="\t",
index_col=0)
elif isinstance(filename_or_table, pandas.core.frame.DataFrame):
self.table = filename_or_table
else:
raise ValueError("Type needs to be string or pandas data frame."
"Type = %s", type(filename_or_table))
assert self.table.shape, "design table is empty"
# parse the design table. Users probably expect this
# to happen once data is uploaded.
self._update()
def _update(self):
"""parse design file and fill class attributes.
Call this function whenever self.table changes.
"""
# remove all entries that should not be included
self.table = self.table[self.table["include"] != 0]
# define attributes
self.conditions = self.table['group'].tolist()
self.pairs = self.table['pair'].tolist()
# TS - use OrderedDict to retain order in unique
self.groups = (list(collections.OrderedDict.fromkeys(
self.conditions)))
self.samples = self.table.index.tolist()
# Test if replicates exist, i.e at least one group has multiple samples
# TS - does this need to be extended to check whether replicates exist
# for each group?
max_per_group = max([self.conditions.count(x) for x in self.groups])
self.has_replicates = max_per_group >= 2
# Test if pairs exist:
npairs = len(set(self.pairs))
has_pairs = npairs == 2
# ..if so, at least two samples are required per pair
if has_pairs:
min_per_pair = min([self.pairs.count(x) for x in set(self.pairs)])
self.has_pairs = min_per_pair >= 2
else:
self.has_pairs = False
# all columns except "include" may be considered as factors
self.factors = self.table.drop(["include"], axis=1)
# remove "pair" from factor if design does not include pairs
if not self.has_pairs:
self.factors.drop("pair", inplace=True, axis=1)
def validate(self, counts=None, model=None):
if counts is not None:
missing = set(self.samples).difference(set(counts.table.columns))
if len(missing) > 0:
raise ValueError(
"following samples in design table are missing"
" from counts table: %s" % ", ".join(missing))
if model is not None:
# check all model terms exist
model_terms = splitModel(model)
missing = set(model_terms).difference(
set(self.table.columns.tolist()))
if len(missing) > 0:
raise ValueError("following terms in the model are missing"
" from the design table: %s" %
", ".join(missing))
# check there are at least two values for each level
for term in model_terms:
levels = set(self.table.ix[:, term])
if len(levels) < 2:
raise ValueError("term '%s' in the model has less "
"than two levels (%s) in the "
" design table" %
(term, ", ".join(levels)))
def restrict(self, counts):
''' return design with samples not in counts table removed '''
self.table = self.table.ix[counts.table.columns, :]
def revalidate(self, counts, model=None):
''' re-validate, i.e post filtering of counts table '''
if len(set(self.samples).symmetric_difference(
set(counts.table.columns))) > 0:
self.restrict(counts)
self._update()
self.validate(counts, model)
else:
pass
def firstPairOnly(self):
'''restrict the design table to the first pair only.
If unpaired will retain whole design table
'''
if not self.pairs:
self.pairs = self.table['pair'].tolist()
self.table = self.table.ix[self.table['pair'] == min(self.pairs), ]
def getSamplesInGroup(self, group):
"""return list of sample names belonging to group."""
if group not in self.groups:
raise KeyError("group '%s' not present")
return self.table[self.table["group"] == group].index.tolist()
def getGroupForSample(self, sample):
"""return group a sample belongs to"""
return self.table.loc[sample]["group"]
def getGroups2Samples(self):
"""return a dictionary mapping a group to samples within the group.
Returns
-------
dict
with groups as keys and list of samples within a group as values.
"""
groups_to_tracks = {}
for group in self.groups:
match_group = (self.table['group'] == group).tolist()
subset = self.table.iloc[match_group, ]
groups_to_tracks[group] = subset.index.tolist()
return groups_to_tracks
def mapGroupsSuffix(self, shuffle_suffix, keep_suffix):
'''use suffixes supplied to extract groups from the
design table and return dictionaries mapping each group to tracks
for keeping with tracks which should be shuffled
'''
groups_to_keep_tracks = {}
groups_to_spike_tracks = {}
keep_suffix = keep_suffix.split(",")
for group in self.groups:
match_group = (self.table['group'] == group).tolist()
tmp_design = self.table.iloc[match_group, ]
groups_to_spike_tracks[group] = [
x + shuffle_suffix for x in tmp_design.index.tolist()]
groups_to_keep_tracks[group] = copy.copy(
groups_to_spike_tracks[group])
groups_to_keep_tracks[group].extend(
[x + y for x in tmp_design.index.tolist() for y in keep_suffix])
return groups_to_keep_tracks, groups_to_spike_tracks
class DEExperiment(object):
''' base clase for DE experiments '''
def __init__(self):
pass
def run(self):
''' Custom DE functions
return a DEResult object'''
class DEResult(object):
''' base class for DE result '''
def __init__(self, testTable=None):
self.table = testTable
def getResults(self):
''' post-process results into generic output
columns are:
- contrast
- treatment_name
- control_name
- test_id
- control_mean
- treatment_mean
- control_std
- treatment_std
- p_value
- p_value_adj
- significant
- l2fold
- transformed_l2fold
- fold
- status
'''
def calculateIHW(self, alpha=0.1):
''' Use the Independent Hypothesis Weighting method from
IGNATIADIS et al (2016) to perform weighted FDR'''
if not ('control_mean' in self.table.columns and
'treatment_mean' in self.table.columns and
'p_value' in self.table.columns):
E.error("IHW requires control_mean, treatment_mean and p_value "
"columns, have you first run the getResults method?")
runIHW = r('''function(df){
library(IHW)
mean_expression = (df$control_mean + df$treatment_mean)/2
ihw_res = ihw(df$p_value ~ mean_expression, alpha = %(alpha)s)
df$p_value_adj = adj_pvalues(ihw_res)
return(df)
}''' % locals())
self.table = pandas2ri.ri2py(runIHW(pandas2ri.py2ri(self.table)))
self.table["significant"] = pvaluesToSignficant(
self.table["p_value_adj"], alpha)
def summariseDEResults(self):
''' summarise DE results. Counts instances of possible outcomes'''
# TS: the summarising is now split by the comparison being made and a
# dict returned with keys=comparisons, value=E.Counter per comparison
self.Summary = {}
control_names = set(self.table['control_name'])
treatment_names = set(self.table['treatment_name'])
for control, treatment in itertools.product(control_names,
treatment_names):
tmp_table = self.table[self.table['control_name'] == control]
tmp_table = tmp_table[tmp_table['treatment_name'] == treatment]
tmp_table.reset_index(inplace=True)
# check control, treatment combination exists
n_rows = tmp_table.shape[0]
if n_rows > 0:
if control != treatment:
label = control + "_" + treatment
else:
label = control
label = re.sub(":", "_int_", label)
counts = E.Counter()
counts.signficant = sum(tmp_table['significant'])
counts.insignficant = (len(tmp_table['significant']) -
counts.signficant)
counts.all_over = sum([x > 0 for x in tmp_table['l2fold']])
counts.all_under = sum([x < 0 for x in tmp_table['l2fold']])
counts.signficant_over = sum(
[tmp_table['significant'][x] == 1 and
tmp_table['l2fold'][x] > 0 for x in range(0, n_rows)])
counts.signficant_under = sum(
[tmp_table['significant'][x] == 1 and
tmp_table['l2fold'][x] < 0 for x in range(0, n_rows)])
self.Summary[label] = counts
def plotMA(self, contrast=None, outfile_prefix=None,
point_alpha=1, point_size=1, R=None):
''' base function for making a MA plot '''
if not R:
R = rpy2.robjects.r
ro.globalenv['tmp_df'] = pandas2ri.py2ri(self.table)
R('''
suppressMessages(library(ggplot2))
suppressMessages(library(grid))
l_txt = element_text(size=20)
tmp_df = tmp_df[tmp_df$contrast=="%(contrast)s",]
tmp_df = tmp_df[order(-tmp_df$p_value_adj),]
p = ggplot(tmp_df, aes(log((control_mean+treatment_mean)/2,2),
transformed_l2fold,
colour=as.factor(significant))) +
geom_point(size=%(point_size)f, alpha=%(point_alpha)f) +
xlab("log2 mean expression") + ylab("log2 fold change")+
ggtitle("%(contrast)s") +
scale_colour_manual(name="Significant", values=c("black", "red")) +
guides(colour = guide_legend(override.aes = list(size=10)))+
theme_bw() +
theme(axis.text.x = l_txt, axis.text.y = l_txt,
axis.title.x = l_txt, axis.title.y = l_txt,
legend.title = l_txt, legend.text = l_txt,
title=l_txt, legend.key.size=unit(1, "cm"),
aspect.ratio=1)
suppressMessages(
ggsave(file="%(outfile_prefix)s_%(contrast)s_MA_plot.png",
width=10, height=10))''' % locals())
def plotVolcano(self, contrast=None, outfile_prefix=None, R=None):
''' base function for Volcano plotting'''
if not R:
R = rpy2.robjects.r
ro.globalenv['tmp_df'] = pandas2ri.py2ri(self.table)
R('''
suppressMessages(library(ggplot2))
suppressMessages(library(grid))
l_txt = element_text(size=20)
tmp_df = tmp_df[tmp_df$contrast=="%(contrast)s",]
p = ggplot(tmp_df, aes(transformed_l2fold, -log(p_value,10),
colour=as.factor(significant))) +
geom_point() + xlab("log2 fold change") + ylab("p-value (-log10)") +
ggtitle("%(contrast)s") +
scale_colour_manual(name="Significant", values=c("black", "#619CFF")) +
guides(colour = guide_legend(override.aes = list(size=10))) +
theme_bw() +
theme(axis.text.x = l_txt, axis.text.y = l_txt,
axis.title.x = l_txt, axis.title.y = l_txt,
legend.title = l_txt, legend.text = l_txt,
title=l_txt, legend.key.size=unit(1, "cm"))
suppressMessages(
ggsave(file="%(outfile_prefix)s_%(contrast)s_volcano_plot.png",
width=10, height=10))''' % locals())
def plotPvalueHist(self, contrast=None, outfile_prefix=None, R=None):
''' base function for Volcano plotting'''
if not R:
R = rpy2.robjects.r
ro.globalenv['tmp_df'] = pandas2ri.py2ri(self.table)
R('''
suppressMessages(library(ggplot2))
suppressMessages(library(grid))
l_txt = element_text(size=20)
tmp_df = tmp_df[tmp_df$contrast=="%(contrast)s",]
p = ggplot(tmp_df, aes(p_value)) +
geom_histogram(fill="dodgerblue4") +
xlab("p-value") + ylab("count") +
ggtitle("p-value histogram - %(contrast)s") +
theme_bw() +
theme(axis.text.x = l_txt, axis.text.y = l_txt,
axis.title.x = l_txt, axis.title.y = l_txt,
title=l_txt)
suppressMessages(
ggsave(file="%(outfile_prefix)s_%(contrast)s_p_value_histogram.png",
width=10, height=10))''' % locals())
def plotPvalueQQ(self, contrast=None, outfile_prefix=None, R=None):
''' base function for Volcano plotting'''
if not R:
R = rpy2.robjects.r
ro.globalenv['tmp_df'] = pandas2ri.py2ri(self.table)
R('''
log_obs_pvalues = sort(-log10(tmp_df[['p_value']]))
uni_pvalues=runif(length(log_obs_pvalues))
log_uni_pvalues= -log10(uni_pvalues)
log_uni_pvalues = sort(log_uni_pvalues)
png(file="%(outfile_prefix)s_%(contrast)s_p_value_qq_plot.png")
plot(log_uni_pvalues,log_obs_pvalues,
xlab=expression(Theoretical~~-log[10](italic(p))),
ylab=expression(Observed~~-log[10](italic(p))),
main="P-value QQ-plot",
pch=20)
abline(0,1)''' % locals())
class DEExperiment_TTest(DEExperiment):
'''DECaller object to run TTest on counts data'''
# TS: to do: deal with genes/regions with zero counts
def run(self, counts, design, normalise=True,
normalise_method="deseq-size-factors"):
# TS: normalisation performed here rather than earlier as
# the method of normalisation is dependent upon the DE test
if normalise is True:
counts.normalise(method=normalise_method)
df_dict = collections.defaultdict(list)
for combination in itertools.combinations(design.groups, 2):
control, treatment = combination
n_rows = counts.table.shape[0]
df_dict["control_name"].extend((control,)*n_rows)
df_dict["treatment_name"].extend((treatment,)*n_rows)
df_dict["test_id"].extend(counts.table.index.tolist())
# set all status values to "OK"
df_dict["status"].extend(("OK",)*n_rows)
# subset counts table for each combination
c_keep = [x == control for
x in design.conditions]
control_counts = counts.table.iloc[:, c_keep]
t_keep = [x == treatment for
x in design.conditions]
treatment_counts = counts.table.iloc[:, t_keep]
c_mean = control_counts.mean(axis=1)
df_dict["control_mean"].extend(c_mean)
df_dict["control_std"].extend(control_counts.std(axis=1))
t_mean = treatment_counts.mean(axis=1)
df_dict["treatment_mean"].extend(t_mean)
df_dict["treatment_std"].extend(treatment_counts.std(axis=1))
t, prob = ttest_ind(control_counts, treatment_counts, axis=1)
df_dict["p_value"].extend(prob)
result = DEResult_TTest(testTable=pandas.DataFrame(df_dict))
result.table.set_index("test_id", inplace=True)
return result
class DEResult_TTest(DEResult):
def getResults(self, fdr):
''' post-process test results table into generic results output '''
# TS - what about zero values?!
self.table["fold"] = (
self.table["treatment_mean"] / self.table["control_mean"])
self.table["p_value_adj"] = adjustPvalues(self.table["p_value"])
self.table["significant"] = pvaluesToSignficant(
self.table["p_value_adj"], fdr)
self.table["l2fold"] = list(numpy.log2(self.table["fold"]))
# note: the transformed log2 fold change is not transformed for TTest
self.table["transformed_l2fold"] = self.table["l2fold"]
self.table["contrast"] = "_vs_".join((self.table['control_name'],
self.table['treatment_name']))
class DEExperiment_edgeR(DEExperiment):
'''DEExperiment object to run edgeR on counts data
See page 13 of the EdgeR user guide::
2. Simply pick a reasonable dispersion value, based on your
experience with similar data, and use that. Although
subjective, this is still more defensible than assuming Poisson
variation. Typical values are dispersion=0.4 for human data,
dispersion=0.1 for data on genetically identical model
organisms or dispersion=0.01 for technical replicates.
'''
def run(self,
counts,
design,
model=None,
contrast=None,
outfile_prefix=None,
ref_group=None,
fdr=0.1,
dispersion=None):
if not design.has_replicates and dispersion is None:
raise ValueError("no replicates and no dispersion")
# create r objects
r_counts = pandas2ri.py2ri(counts.table)
r_groups = ro.StrVector(design.conditions)
r_pairs = ro.StrVector(design.pairs)
r_has_pairs = ro.BoolVector([design.has_pairs])
r_has_replicates = ro.BoolVector([design.has_replicates])
if model is not None:
r_factors_df = pandas2ri.py2ri(design.factors)
else:
r_factors_df = ro.default_py2ri(False)
E.info('running edgeR: groups=%s, replicates=%s, pairs=%s, '
'additional_factors:%s' %
(design.groups, design.has_replicates, design.has_pairs,
design.factors))
levels = set(design.table[contrast])
if len(levels) > 2:
E.warn(
"There are more than 2 levels for the "
"contrast specified" "(%s:%s). The log2fold changes in the "
"results table and MA plots will be for the first two "
"levels in the contrast. The p-value will be the p-value "
"for the overall significance of the contrast. Hence, some "
"genes will have a signficant p-value but 0-fold change "
"between the first two levels" % (contrast, levels))
# build DGEList object
buildDGEList = r('''
suppressMessages(library('edgeR'))
function(counts){
countsTable = DGEList(counts)
countsTable = calcNormFactors(countsTable)
return(countsTable)}''' % locals())
r_countsTable = buildDGEList(r_counts)
# build design matrix
buildDesign = r('''
function(factors_df){
for (level in colnames(factors_df)){
factors_df[[level]] <- factor(factors_df[[level]])
}
factors_df$%(contrast)s <- relevel(
factors_df$%(contrast)s, ref="%(ref_group)s")
design <- model.matrix(%(model)s, data=factors_df)
return(design)}''' % locals())
r_design = buildDesign(r_factors_df)
# fit model
fitModel = r('''
function(countsTable, design, has_replicates){
if (has_replicates[1] == TRUE) {
# estimate common dispersion
countsTable = estimateGLMCommonDisp( countsTable, design )
# estimate trended dispersion
countsTable <- estimateGLMTrendedDisp( countsTable, design)
# estimate tagwise dispersion
countsTable = estimateGLMTagwiseDisp( countsTable, design )
# fitting model to each tag
fit = glmFit( countsTable, design ) }
else {
# fitting model to each tag
fit = glmFit(countsTable, design, dispersion=%(dispersion)s) }
return(fit)}''' % locals())
r_fit = fitModel(r_countsTable, r_design, r_has_replicates)
E.info("Conducting likelihood ratio tests")
lrtTest = r('''
function(fit, design, factors_df, countsTable){
suppressMessages(library(reshape2))
lrt = glmLRT(fit)
lrt_table = as.data.frame(lrt$table)
lrt_table$contrast <- "%(contrast)s"
for (level in colnames(factors_df)){
factors_df[[level]] <- factor(factors_df[[level]])
}
factors_df$%(contrast)s <- relevel(
factors_df$%(contrast)s, ref="%(ref_group)s")
contrast_levels = as.vector(levels(factor(factors_df[["%(contrast)s"]])))
lrt_table$control_name <- contrast_levels[1]
lrt_table$treatment_name <- contrast_levels[2]
dt <- decideTestsDGE(lrt, adjust.method="BH", p.value=%(fdr)s)
isDE <- as.logical(dt)
DEnames <- rownames(fit)[isDE]
png(paste0(c("%(outfile_prefix)s", "MA.png"), collapse="_"))
plotSmear(lrt, de.tags=DEnames, cex=0.35, main="%(contrast)s")
abline(h=c(-1,1), col="blue")
dev.off()
return(lrt_table)}''' % locals())
r_lrt_table = lrtTest(r_fit, r_design, r_factors_df, r_countsTable)
result = DEResult_edgeR(testTable=pandas2ri.ri2py(r_lrt_table))
return result
class DEResult_edgeR(DEResult):
def getResults(self, fdr, DEtype="GLM"):
''' post-process test results table into generic results output '''
E.info("Generating output - results table")
df_dict = collections.defaultdict()
n_rows = self.table.shape[0]
df_dict["treatment_name"] = self.table['treatment_name']
df_dict["control_name"] = self.table['control_name']
df_dict["contrast"] = self.table['contrast']
df_dict["test_id"] = self.table.index
df_dict["control_mean"] = self.table['logCPM']
df_dict["treatment_mean"] = self.table['logCPM']
df_dict["control_std"] = (0,)*n_rows
df_dict["treatment_std"] = (0,)*n_rows
df_dict["p_value"] = self.table['PValue']
df_dict["p_value_adj"] = adjustPvalues(self.table['PValue'])
df_dict["significant"] = pvaluesToSignficant(
df_dict["p_value_adj"], fdr)
df_dict["l2fold"] = (self.table['logFC'])
# TS: the transformed log2 fold change is not transformed!
df_dict["transformed_l2fold"] = df_dict["l2fold"]
# TS: check what happens when no fold change is available
# TS: may need an if/else in list comprehension. Raise E.warn too?
df_dict["fold"] = [math.pow(2, float(x)) for
x in self.table['logFC']]
# set all status values to "OK"
# TS: again, may need an if/else to check...
df_dict["status"] = ("OK",)*n_rows
self.table = pandas.DataFrame(df_dict)
class DEExperiment_DESeq2(DEExperiment):
'''DEExperiment object to run DESeq2 on counts data'''
def run(self,
counts,
design,
fdr=0.1,
fit_type="parametric",
model=None,
outfile_prefix=None,
ref_group=None,
contrast=None,
DEtest="Wald",
R=None):
if not R:
R = rpy2.robjects.r
pandas2ri.activate()
# R will replace any "-" with "." in rownames.
# Here, we make sure the design and counts samples are the same
design.table.index = [x.replace("-", ".") for x in design.table.index]
design.factors.index = [x.replace("-", ".") for x in design.factors.index]
counts.table.columns = [x.replace("-", ".") for x in counts.table.columns]
# create r objects
ro.globalenv['counts'] = pandas2ri.py2ri(counts.table)
ro.globalenv['design'] = pandas2ri.py2ri(design.table)
ro.globalenv['factors_df'] = pandas2ri.py2ri(design.factors)
model_terms = [x for x in re.split("[\+~ ]+", model)[1:]
if x != "0"]
E.info('running DESeq2: groups=%s, replicates=%s, pairs=%s, '
'DE test: %s, additional_factors:%s, ' %
(design.groups, design.has_replicates, design.has_pairs,
DEtest, design.factors))
# load DESeq
R('''suppressMessages(library('DESeq2'))''')
# build DESeq2 Datasets (dds)
assert contrast, ("must supply a contrast for wald or LRT "
"(for LRT, contrast is used to derive reduced model")
if DEtest == "wald":
assert ref_group, "Must supply a ref_group to perform Wald test"
if ref_group:
R('''
for(column in colnames(factors_df)){
factors_df[[column]] = factor(factors_df[[column]])
}
full_model <- formula("%(model)s")
factors_df$%(contrast)s <- relevel(
factors_df$%(contrast)s, ref="%(ref_group)s")
dds <- suppressMessages(DESeqDataSetFromMatrix(
countData= counts,
colData = factors_df,
design = full_model))
''' % locals())
else:
R('''
for(column in colnames(factors_df)){
factors_df[[column]] = factor(factors_df[[column]])
}
full_model <- formula("%(model)s")
dds <- suppressMessages(DESeqDataSetFromMatrix(
countData= counts,
colData = factors_df,
design = full_model))
''' % locals())
if DEtest == "wald":
levels = set(design.table[contrast])
if len(levels) > 2:
E.warn('''Using Wald test for factor with more than 2
levels (%s:%s), Consider LRT''' % (contrast, levels))
contrast = model_terms[-1]
contrast_levels = set(design.factors[contrast])
# performDifferentialTesting
R('''
dds = suppressMessages(
DESeq(dds, test="Wald", fitType="%(fit_type)s"))
contrast_levels = as.vector(levels(dds@colData$%(contrast)s))
png("%(outfile_prefix)s_dispersion.png")
plotDispEsts(dds)
dev.off()
res = suppressMessages(results(dds))
png(paste0(c("%(outfile_prefix)s", "MA.png"), collapse="_"))
plotMA(res, alpha=%(fdr)s)
dev.off()
res = as.data.frame(res)
c = counts(dds, normalized = TRUE)
res$contrast = "%(contrast)s"
contrast_levels = levels(dds@colData$%(contrast)s)
res$control = contrast_levels[1]
res$treatment = contrast_levels[2]
res['test_id'] = rownames(res)
''' % locals())
results = pandas2ri.ri2py(ro.globalenv['res'])
# liklihood ratio test
# Note that if there are more than 3 levels for the contrast,
# the results table will include a log2-fold change from the
# first two levels only, however, MA plots will be generated
# for each combination of levels
elif DEtest == "lrt":
levels = set(design.table[contrast])
if len(levels) > 2:
E.warn('''There are more than 2 levels for the
contrast specified" "(%s:%s). The log2fold changes in the
results table and MA plots will be for the first two
levels in the contrast. The p-value will be the p-value
for the overall significance of the contrast. Hence, some
genes may have a signficant p-value but ~0-fold change
between the first two levels''' % (contrast, levels))
n = 0
reduced_model = [x for x in model_terms if x != contrast]
if len(reduced_model) > 0:
reduced_model = "~" + "+".join(reduced_model)
else:
reduced_model = "~1"
print('''
ddsLRT <- suppressMessages(
DESeq(dds, test="LRT", reduced=formula("%(reduced_model)s"),
betaPrior=TRUE, fitType="%(fit_type)s"))
png("%(outfile_prefix)s_dispersion.png")
plotDispEsts(ddsLRT)
dev.off()
contrast_levels = as.vector(levels(dds@colData$%(contrast)s))
res = suppressMessages(results(ddsLRT, addMLE=TRUE,
contrast=c("%(contrast)s",
contrast_levels[2], contrast_levels[1])))
png(paste0(c("%(outfile_prefix)s", "MA.png"), collapse="_"))
plotMA(res, alpha=%(fdr)s)
dev.off()
res = as.data.frame(res)
res$contrast = "%(contrast)s"
if(length(contrast_levels)==2){
res$control = contrast_levels[1]
res$treatment = contrast_levels[2]
}
else{
res$control = "%(contrast)s"
res$treatment = "%(contrast)s"
}
res['test_id'] = rownames(res)
''' % locals())
R('''
ddsLRT <- suppressMessages(
DESeq(dds, test="LRT", reduced=formula("%(reduced_model)s"),
betaPrior=TRUE, fitType="%(fit_type)s"))
png("%(outfile_prefix)s_dispersion.png")
plotDispEsts(ddsLRT)
dev.off()
contrast_levels = as.vector(levels(dds@colData$%(contrast)s))
res = suppressMessages(results(ddsLRT, addMLE=TRUE,
contrast=c("%(contrast)s",
contrast_levels[2], contrast_levels[1])))
png(paste0(c("%(outfile_prefix)s", "MA.png"), collapse="_"))
plotMA(res, alpha=%(fdr)s)
dev.off()
res = as.data.frame(res)
res$contrast = "%(contrast)s"
if(length(contrast_levels)==2) {
res$control = contrast_levels[1]
res$treatment = contrast_levels[2]
} else {
res$control = "%(contrast)s"
res$treatment = "%(contrast)s"
}
res['test_id'] = rownames(res)
''' % locals())
results = pandas2ri.ri2py(ro.globalenv['res'])
else:
raise ValueError("DEtest must be 'wald' or 'lrt'")
final_result = DEResult_DESeq2(testTable=results)
return final_result
class DEResult_DESeq2(DEResult):
def getResults(self, fdr):
''' post-process test results table into generic results output '''
E.info("Generating output - results table")
df_dict = collections.defaultdict()
n_rows = self.table.shape[0]
df_dict["treatment_name"] = self.table['treatment']
df_dict["control_name"] = self.table['control']
df_dict["test_id"] = self.table['test_id']
df_dict["contrast"] = self.table['contrast']
df_dict["control_mean"] = self.table['baseMean']
df_dict["treatment_mean"] = self.table['baseMean']
df_dict["control_std"] = (0,)*n_rows
df_dict["treatment_std"] = (0,)*n_rows
df_dict["p_value"] = self.table['pvalue']
df_dict["p_value_adj"] = adjustPvalues(self.table['pvalue'])
df_dict["significant"] = pvaluesToSignficant(
df_dict["p_value_adj"], fdr)
df_dict["l2fold"] = self.table['log2FoldChange']
# Transformed l2fold is the shrunken values
df_dict["transformed_l2fold"] = self.table['log2FoldChange']
# TS: check what happens when no fold change is available
# TS: may need an if/else in list comprehension. Raise E.warn too?
df_dict["fold"] = [math.pow(2, float(x)) for
x in df_dict["l2fold"]]
# set all status values to "OK"
# TS: again, may need an if/else to check...
df_dict["status"] = ("OK",)*n_rows
self.table = pandas.DataFrame(df_dict)
# causes errors if multiple instance of same test_id exist, for example
# if multiple constrasts have been tested
# self.table.set_index("test_id", inplace=True)
class DEExperiment_DEXSeq(DEExperiment):
'''DEExperiment object to run DEXSeq on counts data'''
def run(self,
design,
base_dir,
model=None,
flattenedfile=None,
outfile_prefix=None,
ref_group=None,
contrast=None,
fdr=0.1):
pandas2ri.activate()
# create r objects
E.info('running DEXSeq: groups=%s, pairs=%s, replicates=%s, pairs=%s,'
' additional_factors:' %
(design.groups, design.pairs, design.has_replicates,
design.has_pairs))
# load DEXSeq
R('''suppressMessages(library('DEXSeq'))''')
sampleTable = design.table
allfiles = [file for file in os.listdir(base_dir)]
countfiles = []
for item in list(design.table.index):
countfiles += [base_dir+"/"+x for x in allfiles if item in x]
E.info("Processing Samples. Sample table:")
E.info("%s" % sampleTable)
buildCountDataSet = R('''
function(countFiles, gff, sampleTable, model){
full_model <- formula("%(model)s")
dxd <- suppressMessages(DEXSeqDataSetFromHTSeq(
countFiles,
sampleData=sampleTable,
flattenedfile=gff,
design=full_model))
contrast_levels = as.vector(levels(dxd@colData$%(contrast)s))
dxd = estimateSizeFactors(dxd)
dxd = estimateDispersions(dxd)
png("%(outfile_prefix)s_dispersion.png")
plotDispEsts(dxd)
dev.off()
dxd = testForDEU(dxd)
dxd = estimateExonFoldChanges( dxd, fitExpToVar="%(contrast)s")
result = DEXSeqResults(dxd)
result = as.data.frame(result)
result$contrast = "%(contrast)s"
result$log2FoldChange = result$log2fold
if(length(contrast_levels)==2) {
result$control = contrast_levels[1]
result$treatment = contrast_levels[2]
} else {
result$control = "%(contrast)s"
result$treatment = "%(contrast)s"
}
return(result)
}''' % locals())
result = pandas2ri.ri2py(
buildCountDataSet(countfiles, flattenedfile, sampleTable, model))
result['test_id'] = result.index
result['contrast'] = contrast
final_result = DEResult_DEXSeq(result)
return final_result
class DEResult_DEXSeq(DEResult):
def getResults(self, fdr):
''' post-process test results table into generic results output '''
E.info("Generating output - results table")
df_dict = collections.defaultdict()
n_rows = self.table.shape[0]
df_dict["treatment_name"] = self.table['treatment']
df_dict["control_name"] = self.table['control']
df_dict["test_id"] = self.table['test_id']
df_dict["contrast"] = self.table['contrast']
df_dict["control_mean"] = self.table['exonBaseMean']
df_dict["treatment_mean"] = self.table['exonBaseMean']
df_dict["control_std"] = (0,)*n_rows
df_dict["treatment_std"] = (0,)*n_rows
df_dict["p_value"] = self.table['pvalue']
df_dict["p_value_adj"] = adjustPvalues(self.table['pvalue'])
df_dict["significant"] = pvaluesToSignficant(
df_dict["p_value_adj"], fdr)
df_dict["l2fold"] = ("NA",)*n_rows
# Transformed l2fold is the shrunken values
df_dict["transformed_l2fold"] = self.table['log2FoldChange']
df_dict["fold"] = ("NA",)*n_rows
df_dict["status"] = ("OK",)*n_rows
self.table = pandas.DataFrame(df_dict)
# causes errors if multiple instance of same test_id exist, for example
# if multiple constrasts have been tested
# self.table.set_index("test_id", inplace=True)
def plotMAplot(self, design, outfile_prefix):
# need to implement DEXSeq specific MA plot
raise ValueError("MA plotting is not yet implemented for DESeq")
class DEExperiment_Sleuth(DEExperiment):
'''DEExperiment object to run sleuth on kallisto bootstrap files
Unlike the other DEExperiment instances, this does not operate on
a Counts.Counts object but instead reads the bootstrap hd5 files
from kallisto into memory in R and then performs the differential
testing
The run method expects all kallisto abundance.h5 files to be under
a single directory with a subdirectory for each sample
Note: LRT does not generate fold change estimates (see DEResult_Sleuth)
use dummy_run = True if you don't want to perform differential
testing but want the counts/tpm outfiles
'''
def run(self,
design,
base_dir,
model=None,
contrast=None,
outfile_prefix=None,
counts=None,
tpm=None,
fdr=0.1,
DE_test="wald",
reduced_model=None,
dummy_run=False,
genewise=False,
gene_biomart=None,
ref_group=None):
if DE_test == "lrt":
E.info("Note: LRT will not generate fold changes")
assert reduced_model is not None, ("need to provide a reduced "
"model to use LRT")
# Design table needs a "sample" column
design.table['sample'] = design.table.index
r_design_df = pandas2ri.py2ri(design.table)
E.info('running sleuth: groups=%s, pairs=%s, replicates=%s, pairs=%s, '
'additional_factors:' %
(design.groups, design.pairs, design.has_replicates,
design.has_pairs))
# load sleuth
r('''suppressMessages(library('sleuth'))''')
# make variates string to ensure all model terms are in the
# design dataframe for sleuth
model_terms = [x for x in re.split("[\+~ ]+", model)[1:]
if x != "0"]
variates = "c(%s)" % ",".join(model_terms)
# need to code in option to not use a reference group (e.g for LRT)
if genewise:
assert gene_biomart, ("for genewise analysis, "
"must provide a 'gene_biomart'")
createSleuthObject = r('''
function(design_df){
library(biomaRt)
sample_id = design_df$sample
kal_dirs <- sapply(sample_id,
function(id) file.path('%(base_dir)s', id))
design_df <- dplyr::select(design_df, sample = sample,
%(variates)s)
design_df <- dplyr::mutate(design_df, path = kal_dirs)
%(contrast)s <- factor(design_df$%(contrast)s)
%(contrast)s <- relevel(%(contrast)s,ref='%(ref_group)s')
md <- model.matrix(%(model)s, design_df)
colnames(md)[grep("%(contrast)s", colnames(md))] <- '%(contrast)s%(ref_group)s'
mart <- biomaRt::useMart(biomart = "ENSEMBL_MART_ENSEMBL",
#dataset = "hsapiens_gene_ensembl",
dataset = "%(gene_biomart)s",
host="www.ensembl.org")
t2g <- biomaRt::getBM(
attributes = c("ensembl_transcript_id","ensembl_gene_id",
"external_gene_name"), mart = mart)
t2g <- dplyr::rename(t2g, target_id = ensembl_transcript_id,
ens_gene = ensembl_gene_id,
ext_gene = external_gene_name)
so <- sleuth_prep(design_df, md,
target_mapping = t2g, aggregation_column = 'ens_gene')
so <- suppressMessages(sleuth_fit(so))
return(so)
}''' % locals())
else:
createSleuthObject = r('''
function(design_df){
sample_id = design_df$sample
kal_dirs <- sapply(sample_id,
function(id) file.path('%(base_dir)s', id))
design_df <- dplyr::select(design_df, sample = sample,
%(variates)s)
design_df <- dplyr::mutate(design_df, path = kal_dirs)
%(contrast)s <- factor(design_df$%(contrast)s)
%(contrast)s <- relevel(%(contrast)s,ref='%(ref_group)s')
md <- model.matrix(%(model)s, design_df)
colnames(md)[grep("%(contrast)s", colnames(md))] <- '%(contrast)s%(ref_group)s'
so <- sleuth_prep(design_df, md)
so <- sleuth_fit(so)
return(so)
}''' % locals())
so = createSleuthObject(r_design_df)
# write out counts and tpm tables if required
if counts:
makeCountsTable = r('''
function(so){
library('reshape')
df = cast(so$obs_raw, target_id~sample, value = "est_counts")
colnames(df)[1] <- "transcript_id"
write.table(df, "%(counts)s", sep="\t", row.names=F, quote=F)
}''' % locals())
makeCountsTable(so)
if tpm:
makeTPMTable = r('''
function(so){
library('reshape')
df = cast(so$obs_raw, target_id~sample, value = "tpm")
colnames(df)[1] <- "transcript_id"
write.table(df, "%(tpm)s", sep="\t", row.names=F, quote=F)
}''' % locals())
makeTPMTable(so)
if dummy_run:
return None
if DE_test == "lrt":
differentialTesting = r('''
function(so){
so <- suppressMessages(sleuth_fit(so, formula = %(reduced_model)s,
fit_name = "reduced"))
so <- suppressMessages(sleuth_lrt(so, "reduced", "full"))
results_table <- sleuth_results(so, test = 'reduced:full',
test_type = 'lrt')
return(results_table)
} ''' % locals())
final_result = DEResult_Sleuth(pandas2ri.ri2py(
differentialTesting(so)))
elif DE_test == "wald":
differentialTesting = r('''
function(so){
so <- sleuth_wt(so, which_beta = '%(contrast)s%(ref_group)s')
p_ma = plot_ma(so, '%(contrast)s%(ref_group)s')
ggsave("%(outfile_prefix)s_%(contrast)s_sleuth_ma.png",
width=15, height=15, units="cm")
p_vars = plot_vars(so, '%(contrast)s%(ref_group)s')
ggsave("%(outfile_prefix)s_%(contrast)s_sleuth_vars.png",
width=15, height=15, units="cm")
p_mean_var = plot_mean_var(so)
ggsave("%(outfile_prefix)s_%(contrast)s_sleuth_mean_var.png",
width=15, height=15, units="cm")
results_table <- sleuth_results(so, test = '%(contrast)s%(ref_group)s')
return(results_table)
} ''' % locals())
results = pandas2ri.ri2py(differentialTesting(so))
results['contrast'] = contrast
else:
raise ValueError("unknown DE test type, use 'wald' or 'lrt'")
final_result = DEResult_Sleuth(results)
return final_result
class DEResult_Sleuth(DEResult):
def getResults(self, fdr):
''' post-process test results table into generic results output
expression and fold changes from Sleuth are natural logs'''
E.info("Generating output - results table")
df_dict = collections.defaultdict()
n_rows = self.table.shape[0]
df_dict["treatment_name"] = ("NA",)*n_rows
df_dict["control_name"] = ("NA",)*n_rows
df_dict["test_id"] = self.table['target_id']
df_dict["contrast"] = self.table['contrast']
df_dict["control_mean"] = [math.exp(float(x)) for
x in self.table['mean_obs']]
df_dict["treatment_mean"] = df_dict["control_mean"]
df_dict["control_std"] = (0,)*n_rows
df_dict["treatment_std"] = (0,)*n_rows
df_dict["p_value"] = self.table['pval']
df_dict["p_value_adj"] = adjustPvalues(self.table['pval'])
df_dict["significant"] = pvaluesToSignficant(df_dict["p_value_adj"],
fdr)
df_dict["fold"] = [math.exp(float(x)) for
x in self.table['b']]
df_dict["l2fold"] = [math.log(float(x), 2) for x in df_dict['fold']]
df_dict["transformed_l2fold"] = df_dict["l2fold"]
# set all status values to "OK"
# TS: again, may need an if/else to check...
df_dict["status"] = ("OK",)*n_rows
self.table = pandas.DataFrame(df_dict)
# causes errors if multiple instance of same test_id exist, for example
# if multiple constrasts have been tested
# self.table.set_index("test_id", inplace=True)
###############################################################################
def buildProbeset2Gene(infile,
outfile,
database="hgu133plus2.db",
mapping="hgu133plus2ENSEMBL"):
'''build map relating a probeset to an ENSEMBL gene_id'''
R.library(database)
# map is a Bimap object
m = r(mapping)
result = R.toTable(m)
outf = open(outfile, "w")
outf.write("probe_id\tgene_id\n")
for probeset_id, gene_id in zip(result["probe_id"],
result["ensembl_id"]):
outf.write("%s\t%s\n" % (probeset_id, gene_id))
outf.close()
E.info("written %i mappings to %s: probes=%i, genes=%i" %
(len(result),
outfile,
len(set(result["probe_id"])),
len(set(result["ensembl_id"]))))
GeneExpressionResult = collections.namedtuple(
"GeneExpressionResult",
"test_id treatment_name treatment_mean treatment_std "
"control_name control_mean control_std "
"pvalue qvalue l2fold fold transformed_l2fold "
"significant status")
def writeExpressionResults(outfile, result):
'''output expression results table.'''
if outfile == sys.stdout:
outf = outfile
else:
outf = iotools.open_file(outfile, "w")
outf.write("%s\n" % "\t".join(GeneExpressionResult._fields))
for x in sorted(result):
outf.write("%s\n" % "\t".join(map(str, x)))
if outf != sys.stdout:
outf.close()
class WelchsTTest(object):
'''base class for computing expression differences.
'''
def __call__(self,
probesets,
treatments,
controls):
assert len(probesets) == len(treatments[0])
assert len(probesets) == len(controls[0])
nskipped = 0
results = []
for probeset, treatment, control in zip(
probesets, zip(*treatments), zip(*controls)):
nval1, nval2 = len(treatment), len(control)
mean1, mean2 = numpy.mean(treatment), numpy.mean(control)
stddev1, stddev2 = numpy.std(treatment), numpy.std(control)
try:
s = Stats.doWelchsTTest(nval1, mean1, stddev1,
nval2, mean2, stddev2,
alpha=0.05)
except ValueError:
E.warn(
"expressionDifferences: standard deviations are 0 for "
"probeset %s - skipped" % probeset)
nskipped += 1
continue
s.mProbeset = probeset
results.append(s)
qvalues = Stats.doFDR([x.mPValue for x in results]).mQValues
for s, qvalue in zip(results, qvalues):
s.mQValue = qvalue
return results, nskipped
class SAMR(object):
'''SAM analysis of microarray data.
Use the Two-Class Unpaired Case Assuming Unequal Variances.
This uses the samr library.
Significant genes are either called at *fdr* or the
top *ngenes* are returned.
*treatments* and *control* are arrays of
arrays of expression values.
See
https://stat.ethz.ch/pipermail/bioconductor/2008-July/023251.html
for an explanation of the differences between siggens SAM
and Excel SAM. This version is parameterised to reproduce Excel SAM
by setting::
var.equal = TRUE
med = TRUE
.. note::
SAM requires log2 scaled expression levels.
'''
def __call__(self, probesets,
treatments,
controls,
pattern=None,
fdr=0.10,
ngenes=None,
npermutations=1000,
ndelta=10,
method="ttest"):
if ngenes and fdr:
raise ValueError("either supply ngenes or fdr, but not both.")
R.library("samr")
m = numpy.matrix(treatments + controls)
m = numpy.transpose(m)
labels = numpy.array([1] * len(treatments) + [2] * len(controls))
R.assign("x", numpy.array(m))
R.assign("y", labels)
R.assign("probesets", probesets)
data = r(
'''data=list( x=x, y=y, geneid=1:length(probesets), genenames=probesets, logged2=TRUE)''')
result = r(
'''samr.obj<-samr(data, resp.type="Two class unpaired", nperms=100)''')
r('''plot(samr.obj, delta=.4)''')
class SAM(object):
'''SAM analysis of microarray data.
Use the Two-Class Unpaired Case Assuming Unequal Variances.
This uses the siggenes library. Note that there is also
an rsam package at:
http://rss.acs.unt.edu/Rdoc/library/samr/html/samr.html
Significant genes are either called at *fdr* or the
top *ngenes* are returned.
*treatments* and *control* are arrays of
arrays of expression values.
See
https://stat.ethz.ch/pipermail/bioconductor/2008-July/023251.html
for an explanation of the differences between siggens SAM
and Excel SAM. To parameterize the FDR to excel sam, set the
flag *use_excel_sam*.
.. note::
SAM requires log2 scaled expression levels.
I ran into trouble using this library. I was not able to
reproduce the same results from the original SAM study getting
differences in d and in the fdr.
fold change is treatment / control.
'''
def __call__(self, probesets,
treatments,
controls,
pattern=None,
fdr=0.10,
ngenes=None,
npermutations=1000,
ndelta=10,
method="ttest",
use_excel_sam=False,
treatment_label="treatment",
control_label="control"):
if ngenes and fdr:
raise ValueError("either supply ngenes or fdr, but not both.")
R.library("siggenes")
m = numpy.matrix(treatments + controls)
m = numpy.transpose(m)
E.debug("build expression matrix: %i x %i" % m.shape)
labels = numpy.array([1] * len(treatments) + [0] * len(controls))
# 1000 permutations for P-Values of down to 0.0001. Setting this
# to a high value improved reproducibility of results.
kwargs = {}
# kwargs set to replicate excel SAM
if use_excel_sam:
kwargs.update(
{"control":
r('''samControl( lambda = 0.5, n.delta = %(ndelta)s) ''' %
locals()),
"med": True,
"var.equal": True})
else:
kwargs.update({"control":
r('''samControl( n.delta = %(ndelta)s ) ''' %
locals())},)
# the option B needs to be not set if wilc.stat is chosen
if method == "ttest":
kwargs["method"] = r('''d.stat''')
kwargs["B"] = npermutations
elif method == "wilc":
kwargs["method"] = r('''wilc.stat''')
elif method == "cat":
kwargs["method"] = r('''cat.stat''')
else:
raise ValueError("unknown statistic `%s`" % method)
E.info("running sam with the following options: %s" % str(kwargs))
a = R.sam(numpy.array(m),
labels,
gene_names=numpy.array(probesets),
**kwargs)
# E.debug("%s" % str(a))
R.assign("a", a)
fdr_data = collections.namedtuple("sam_fdr", (
"delta", "p0", "false", "significant", "fdr", "cutlow",
"cutup", "j2", "j1"))
cutoff_data = collections.namedtuple(
"sam_cutoff", ("delta", "significant", "fdr"))
gene_data = collections.namedtuple(
"sam_fdr", ("row", "dvalue", "stddev", "rawp", "qvalue", "rfold"))
def _totable(robj):
'''convert robj to a row-wise table.'''
s = numpy.matrix(robj)
t = [numpy.array(x).reshape(-1,) for x in s]
return t
# extract the fdr values
# returns R matrix
t = _totable(a.do_slot('mat.fdr'))
assert len(t[0]) == len(fdr_data._fields)
# for x in t: E.debug( "x=%s" % str(x))
fdr_values = [fdr_data(*x) for x in t]
# find d cutoff
if fdr is not None and fdr > 0:
s = numpy.matrix(R.findDelta(a, fdr))
try:
cutoffs = [cutoff_data(*numpy.array(x).reshape(-1,))
for x in s]
E.debug("sam cutoffs for fdr %f: %s" % (fdr, str(cutoffs)))
cutoff = cutoffs[-1]
except TypeError:
E.debug("could not get cutoff")
cutoff = None
elif ngenes:
s = numpy.matrix(R.findDelta(a, ngenes))
try:
cutoffs = [cutoff_data(*numpy.array(x).reshape(-1,))
for x in s]
E.debug("sam cutoffs for fdr %f: %s" % (fdr, str(cutoffs)))
cutoff = cutoffs[-1]
except TypeError:
E.debug("could not get cutoff")
cutoff = None
else:
raise ValueError("either supply ngenes or fdr")
# collect (unadjusted) p-values and qvalues for all probesets
pvalues = dict(zip(probesets, r('''<EMAIL>''')))
qvalues = dict(zip(probesets, r('''a<EMAIL>''')))
if pattern:
outfile = pattern % "sam.pdf"
R.pdf(outfile)
if cutoff:
R.plot(a, cutoff.delta)
else:
R.plot(a)
r['dev.off']()
siggenes = {}
significant_genes = set()
if cutoff is not None:
E.debug("using cutoff %s" % str(cutoff))
summary = r('''summary( a, %f )''' % cutoff.delta)
# summary = R.summary( a, cutoff.delta )
R.assign("summary", summary)
significant_genes = set(
[probesets[int(x) - 1] for x
in r('''<EMAIL>''')])
# E.debug( "significant genes=%s" % str(significant_genes))
r_result = zip(*_totable(summary.do_slot('mat.sig')))
if len(r_result) > 0:
assert len(r_result[0]) == 6, \
"expected six columns from siggenes module, got: %s" % \
len(r_result[0])
for x in r_result:
if x[4] > fdr:
E.warn("%s has qvalue (%f) larger than cutoff, but "
"is significant significant." % (str(x), x[4]))
# except TypeError:
# only a single value
# x = [r_result[y] for y in ("Row", "d.value", "stdev", "rawp", "q.value", "R.fold") ]
# if x[4] > fdr:
# E.warn( "%s has qvalue (%f) larger than cutoff, but is called significant." % (str(x), x[4]))
siggenes[probesets[int(x[0]) - 1]] = gene_data(*x)
else:
E.debug("no cutoff found - no significant genes.")
genes = []
for probeset, treatment, control in zip(
probesets, zip(*treatments), zip(*controls)):
mean1, mean2 = numpy.mean(treatment), numpy.mean(control)
if probeset in siggenes:
s = siggenes[probeset]
pvalue = s.rawp
qvalue = s.qvalue
else:
pvalue = pvalues[probeset]
qvalue = qvalues[probeset]
significant = (0, 1)[probeset in significant_genes]
genes.append(GeneExpressionResult._make((probeset,
treatment_label,
mean1,
numpy.std(treatment),
control_label,
mean2,
numpy.std(control),
pvalue,
qvalue,
mean1 - mean2,
math.pow(
2, mean1 - mean2),
math.pow(
2, mean1 - mean2),
significant,
"OK")))
return genes, cutoff, fdr_values
#########################################################################
#########################################################################
#########################################################################
def loadTagData(tags_filename, design_filename):
'''load tag data for deseq/edger analysis.
*tags_file* is a tab-separated file with counts.
*design_file* is a tab-separated file with the
experimental design with a minimum of four columns::
track include group pair
CW-CD14-R1 0 CD14 1
CW-CD14-R2 0 CD14 1
CW-CD14-R3 1 CD14 1
CW-CD4-R1 1 CD4 1
FM-CD14-R1 1 CD14 2
FM-CD4-R2 0 CD4 2
FM-CD4-R3 0 CD4 2
FM-CD4-R4 0 CD4 2
track
name of track - should correspond to column header in *infile*
include
flag to indicate whether or not to include this data
group
group indicator - experimental group
pair
pair that sample belongs to (for paired tests)
Additional columns in design file are taken to contain levels for
additional factors and may be included for tests that allow multi-factor
model designs.
This method creates various R objects:
countsTable : data frame with counts.
groups : vector with groups
pairs : vector with pairs
factors : df of additional factors for more complex model designs
'''
# Load counts table
E.info("loading tag data from %s" % tags_filename)
r('''counts_table = read.table('%(tags_filename)s',
header=TRUE,
row.names=1,
stringsAsFactors=TRUE,
comment.char='#')''' % locals())
E.info("read data: %i observations for %i samples" %
tuple(r('''dim(counts_table)''')))
E.debug("sample names: %s" % r('''colnames(counts_table)'''))
# Load comparisons from file
r('''pheno = read.delim(
'%(design_filename)s',
header=TRUE,
stringsAsFactors=TRUE,
comment.char='#')''' % locals())
# Make sample names R-like - substitute - for .
r('''pheno[,1] = gsub('-', '.', pheno[,1]) ''')
E.debug("design names: %s" % r('''pheno[,1]'''))
# Ensure pheno rows match count columns
pheno = r(
'''pheno2 = pheno[match(colnames(counts_table),pheno[,1]),,drop=FALSE]''')
missing = r('''colnames(counts_table)[is.na(pheno2)][1]''')
if missing:
E.warn("missing samples from design file are ignored: %s" %
missing)
# subset data & set conditions
r('''includedSamples <- !(is.na(pheno2$include) | pheno2$include == '0') ''')
E.debug("included samples: %s" %
r('''colnames(counts_table)[includedSamples]'''))
r('''countsTable <- counts_table[ , includedSamples ]''')
r('''groups <- factor(pheno2$group[ includedSamples ])''')
r('''conds <- pheno2$group[ includedSamples ]''')
r('''pairs <- factor(pheno2$pair[ includedSamples ])''')
# if additional columns present, pass to 'factors'
r('''if (length(names(pheno2)) > 4) {
factors <- data.frame(pheno2[includedSamples,5:length(names(pheno2))])
} else {
factors <- NA
}''')
E.info("filtered data: %i observations for %i samples" %
tuple(r('''dim(countsTable)''')))
def filterTagData(filter_min_counts_per_row=1,
filter_min_counts_per_sample=10,
filter_percentile_rowsums=0):
'''filter tag data.
* remove rows with fewer than x counts in most highly expressed sample
* remove samples with fewer than x counts in most highly expressed row
* remove the lowest percentile of rows in the table, sorted
by total tags per row
'''
# Remove windows with no data
r('''max_counts = apply(countsTable,1,max)''')
r('''countsTable = countsTable[max_counts>=%i,]''' %
filter_min_counts_per_row)
E.info("removed %i empty rows" %
tuple(r('''sum(max_counts == 0)''')))
observations, samples = tuple(r('''dim(countsTable)'''))
E.info("trimmed data: %i observations for %i samples" %
(observations, samples))
# remove samples without data
r('''max_counts = apply(countsTable,2,max)''')
empty_samples = tuple(
r('''max_counts < %i''' % filter_min_counts_per_sample))
sample_names = r('''colnames(countsTable)''')
nempty_samples = sum(empty_samples)
if nempty_samples:
E.warn("%i empty samples are being removed: %s" %
(nempty_samples,
",".join([sample_names[x]
for x, y in enumerate(empty_samples) if y])))
r('''countsTable <- countsTable[, max_counts >= %i]''' %
filter_min_counts_per_sample)
r('''groups <- groups[max_counts >= %i]''' %
filter_min_counts_per_sample)
r('''pairs <- pairs[max_counts >= %i]''' %
filter_min_counts_per_sample)
r('''if (!is.na(factors)) {factors <- factors[max_counts >= %i,]}''' %
filter_min_counts_per_sample)
observations, samples = tuple(r('''dim(countsTable)'''))
# percentile filtering
if filter_percentile_rowsums > 0:
percentile = float(filter_percentile_rowsums) / 100.0
r('''sum_counts = rowSums( countsTable )''')
r('''take = (sum_counts > quantile(sum_counts, probs = %(percentile)f))''' %
locals())
discard, keep = r('''table( take )''')
E.info("percentile filtering at level %f: keep=%i, discard=%i" %
(filter_percentile_rowsums,
keep, discard))
r('''countsTable = countsTable[take,]''')
observations, samples = tuple(r('''dim(countsTable)'''))
return observations, samples
def groupTagData(ref_group=None, ref_regex=None):
'''compute groups and pairs from tag data table.'''
if ref_regex is not None and ref_group is None:
groups = r('''levels(groups)''')
for g in groups:
rx = re.compile(ref_regex)
if rx.search(g):
ref_group = g
# Relevel the groups so that the reference comes first
if ref_group is not None:
E.info("reference group (control) is '%s'" % ref_group)
r('''groups <- relevel(groups, ref="%s")''' % ref_group)
groups = r('''levels(groups)''')
pairs = r('''levels(pairs)''')
factors = r('''factors''')
# JJ - check whether there are additional factors in design file...
# warning... isintance(df, rpy.robjects.vectors.Vector) returns True
if isinstance(factors, rpy2.robjects.vectors.DataFrame):
E.warn("There are additional factors in design file that are ignored"
" by groupTagData: %s" % factors.r_repr())
# AH: uncommented as it causes an issue with rpy2 2.7.7
# else:
# # Hack... must be a better way to evaluate r NA instance in python?
# import pdb; pdb.set_trace()
# assert len(list(factors)) == 1 and bool(list(factors)[0]) is False, \
# "factors must either be DataFrame or NA in R global namespace"
# Test if replicates exist - at least one group must have multiple samples
max_per_group = r('''max(table(groups)) ''')[0]
has_replicates = max_per_group >= 2
# Test if pairs exist:
npairs = r('''length(table(pairs)) ''')[0]
has_pairs = npairs == 2
# at least two samples per pair
if has_pairs:
min_per_pair = r('''min(table(pairs)) ''')[0]
has_pairs = min_per_pair >= 2
return groups, pairs, has_replicates, has_pairs
def plotCorrelationHeatmap(method="correlation"):
'''plot a heatmap of correlations derived from
countsTable.
'''
if method == "correlation":
r('''dists <- dist( (1 - cor(countsTable)) / 2 )''')
else:
r('''dists <- dist( t(as.matrix(countsTable)), method = '%s' )''' %
method)
r('''heatmap( as.matrix( dists ), symm=TRUE )''')
def plotPairs():
'''requires counts table'''
# Plot pairs
r('''panel.pearson <- function(x, y, digits=2, prefix="", cex.cor, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- abs(cor(x, y))
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.6/strwidth(txt)
x = 0.5;
y = 0.5;
if (par("xlog")) { x = 10^x };
if (par("ylog")) { y = 10^y };
text(x, y, txt, cex = cex);
}
''')
try:
r('''pairs(countsTable,
lower.panel = panel.pearson,
pch=".",
labels=colnames(countsTable),
log="xy")''')
except RRuntimeError:
E.warn("can not plot pairwise scatter plot")
def plotPCA(groups=True):
'''plot a PCA plot from countsTable using ggplot.
If groups is *True*, the variable ``groups`` is
used for colouring. If *False*, the groups are
determined by sample labels.
'''
r('''suppressMessages(library(ggplot2))''')
r('''pca = prcomp(t(countsTable))''')
# Build factor groups by splitting labels at "."
r('''colour=groups''')
r('''shape=0''')
r('''size=1''')
if groups is False:
r('''mm = matrix(
unlist(sapply(colnames(countsTable),strsplit,'[.]')),
nrow=length(colnames(countsTable)),
byrow=T)''')
nrows, nlevels = r('''dim(mm)''')
if nlevels > 1:
r('''colour=mm[,1]''')
if nlevels > 2:
r('''shape=mm[,2]''')
try:
r('''p1 = ggplot(
as.data.frame(pca$x),
aes(x=PC1, y=PC2,
colour=colour,
shape=shape,
label=rownames(pca$x))) \
+ geom_text(size=4, vjust=1) \
+ geom_point()''')
r('''p2 = qplot(x=PC1, y=PC3,
data = as.data.frame(pca$x),
label=rownames(pca$x),
shape=shape,
colour=colour)''')
r('''p3 = qplot(x=PC2, y=PC3,
data = as.data.frame(pca$x),
label=rownames(pca$x),
shape=shape,
colour=colour)''')
# TODO: plot all in a multi-plot with proper scale
# the following squishes the plots
# r('''source('%s')''' %
# os.path.join(os.path.dirname(E.__file__),
# "../R",
# "multiplot.R"))
# r('''multiplot(p1, p2, p3, cols=2)''')
r('''plot(p1)''')
except RRuntimeError as msg:
E.warn("could not plot in plotPCA(): %s" % msg)
def runEdgeR(outfile,
outfile_prefix="edger.",
fdr=0.1,
prefix="",
dispersion=None,
ref_group=None,
ref_regex=None,
):
'''run EdgeR on countsTable.
Results are stored in *outfile* and files prefixed by *outfile_prefix*.
The dispersion is usually measuered from replicates. If there are no
replicates, you need to set the *dispersion* explicitely.
See page 13 of the EdgeR user guide::
2. Simply pick a reasonable dispersion value, based on your
experience with similar data, and use that. Although
subjective, this is still more defensible than assuming Poisson
variation. Typical values are dispersion=0.4 for human data,
dispersion=0.1 for data on genetically identical model
organisms or dispersion=0.01 for technical replicates.
'''
# load library
r('''suppressMessages(library('edgeR'))''')
groups, pairs, has_replicates, has_pairs = groupTagData(ref_group,
ref_regex)
# output heatmap plot
fn = '%(outfile_prefix)sheatmap.png' % locals()
E.info("outputing heatmap to {}".format(fn))
R.png(fn)
plotCorrelationHeatmap()
r['dev.off']()
E.info('running EdgeR: groups=%s, pairs=%s, replicates=%s, pairs=%s' %
(groups, pairs, has_replicates, has_pairs))
if has_pairs:
# output difference between groups
R.png('''%(outfile_prefix)sbalance_groups.png''' % locals())
first = True
for g1, g2 in itertools.combinations(groups, 2):
r('''a = rowSums( countsTable[groups == '%s'] ) ''' % g1)
r('''b = rowSums( countsTable[groups == '%s'] ) ''' % g2)
if first:
r('''plot(cumsum(sort(a - b)), type = 'l') ''')
first = False
else:
r('''lines(cumsum(sort(a - b))) ''')
r['dev.off']()
r('''suppressMessages(library('ggplot2'))''')
r('''suppressMessages(library('reshape'))''')
# output difference between pairs within groups
first = True
legend = []
for pair in pairs:
for g1, g2 in itertools.combinations(groups, 2):
key = re.sub("-", "_", "pair_%s_%s_vs_%s" % (pair, g1, g2))
legend.append(key)
# print r('''colnames( countsTable) ''')
# print r(''' pairs=='%s' ''' % pair)
# print r(''' groups=='%s' ''' % g1)
r('''a = rowSums( countsTable[pairs == '%s' & groups == '%s'])''' % (
pair, g1))
r('''b = rowSums( countsTable[pairs == '%s' & groups == '%s'])''' % (
pair, g2))
r('''c = cumsum( sort(a - b) )''')
r('''c = c - min(c)''')
if first:
data = r('''d = data.frame( %s = c)''' % key)
first = False
else:
r('''d$%s = c''' % key)
# remove row names (gene idenitifiers)
r('''row.names(d) = NULL''')
# add numbers of genes (x-axis)
r('''d$genes=1:nrow(d)''')
# merge data for ggplot
r('''d = melt( d, 'genes', variable_name = 'comparison' )''')
# plot
r('''gp = ggplot(d)''')
r('''pp = gp + \
geom_line(aes(x=genes,y=value,group=comparison,color=comparison))''')
try:
R.ggsave('''%(outfile_prefix)sbalance_pairs.png''' % locals())
r['dev.off']()
except RRuntimeError:
E.warn("could not plot")
# build DGEList object
# ignore message: "Calculating library sizes from column totals"
r('''countsTable = suppressMessages(DGEList(countsTable, group=groups))''')
# Relevel groups to make the results predictable - IMS
if ref_group is not None:
r('''countsTable$samples$group <- relevel(countsTable$samples$group,
ref = "%s")''' % ref_group)
else:
# if no ref_group provided use first group in groups
r('''countsTable$sample$group <- relevel(countsTable$samples$group,
ref = "%s")''' % groups[0])
# calculate normalisation factors
E.info("calculating normalization factors")
r('''countsTable = calcNormFactors( countsTable )''')
E.info("output")
# output MDS plot
R.png('''%(outfile_prefix)smds.png''' % locals())
try:
r('''plotMDS( countsTable )''')
except RRuntimeError:
E.warn("can not plot mds")
r['dev.off']()
# build design matrix
if has_pairs:
r('''design = model.matrix(~pairs + countsTable$samples$group)''')
else:
r('''design = model.matrix(~countsTable$samples$group)''')
# r('''rownames(design) = rownames( countsTable$samples )''')
# r('''colnames(design)[length(colnames(design))] = "CD4" ''' )
# fitting model to each tag
if has_replicates:
# estimate common dispersion
r('''countsTable = estimateGLMCommonDisp(countsTable, design)''')
# estimate tagwise dispersion
r('''countsTable = estimateGLMTagwiseDisp(countsTable, design)''')
# fitting model to each tag
r('''fit = glmFit(countsTable, design)''')
else:
# fitting model to each tag
if dispersion is None:
raise ValueError("no replicates and no dispersion")
E.warn("no replicates - using a fixed dispersion value")
r('''fit = glmFit(countsTable, design, dispersion=%f)''' %
dispersion)
# perform LR test
r('''lrt = glmLRT(fit)''')
E.info("Generating output")
# output cpm table
r('''suppressMessages(library(reshape2))''')
r('''countsTable.cpm <- cpm(countsTable, normalized.lib.sizes=TRUE)''')
r('''melted <- melt(countsTable.cpm)''')
r('''names(melted) <- c("test_id", "sample", "ncpm")''')
# melt columns are factors - convert to string for sorting
r('''melted$test_id = levels(melted$test_id)[as.numeric(melted$test_id)]''')
r('''melted$sample = levels(melted$sample)[as.numeric(melted$sample)]''')
# sort cpm table by test_id and sample
r('''sorted = melted[with(melted, order(test_id, sample)),]''')
r('''gz = gzfile("%(outfile_prefix)scpm.tsv.gz", "w" )''' % locals())
r('''write.table(sorted, file=gz, sep = "\t",
row.names=FALSE, quote=FALSE)''')
r('''close(gz)''')
# compute adjusted P-Values
r('''padj = p.adjust(lrt$table$PValue, 'BH')''')
rtype = collections.namedtuple("rtype", "lfold logCPM LR pvalue")
# output differences between pairs
if len(groups) == 2:
R.png('''%(outfile_prefix)smaplot.png''' % locals())
r('''plotSmear(countsTable, pair=c('%s'))''' % "','".join(groups))
r('''abline(h=c(-2, 2), col='dodgerblue') ''')
r['dev.off']()
# I am assuming that logFC is the base 2 logarithm foldchange.
# Parse results and parse to file
results = []
counts = E.Counter()
df = r('''lrt$table''')
df["padj"] = r('''padj''')
counts.significant = sum(df.padj <= fdr)
counts.insignificant = sum(df.padj > fdr)
counts.significant_over = sum((df.padj <= fdr) & (df.logFC > 0))
counts.significant_under = sum((df.padj <= fdr) & (df.logFC < 0))
counts.input = len(df)
counts.all_over = sum(df.logFC > 0)
counts.all_under = sum(df.logFC < 0)
counts.fail = sum(df.PValue.isnull())
counts.ok = counts.input - counts.fail
df["fold"] = df.logFC.pow(2.0)
df["significant"] = df.padj <= fdr
# TODO: use pandas throughout
for interval, d in df.iterrows():
# fold change is determined by the alphabetical order of the factors.
# Is the following correct?
results.append(GeneExpressionResult._make((
interval,
groups[1],
d.logCPM,
0,
groups[0],
d.logCPM,
0,
d.PValue,
d.padj,
d.logFC,
d.fold,
d.logFC, # no transform of lfold
str(int(d.significant)),
"OK")))
writeExpressionResults(outfile, results)
outf = iotools.open_file("%(outfile_prefix)ssummary.tsv" % locals(), "w")
outf.write("category\tcounts\n%s\n" % counts.asTable())
outf.close()
# needs to put into class
##
def deseqPlotSizeFactors(outfile):
'''plot size factors - requires cds object.'''
R.png(outfile)
r('''par(mar=c(8,4,4,2))''')
r('''barplot( sizeFactors( cds ), main="size factors", las=2)''')
r['dev.off']()
def deseqOutputSizeFactors(outfile):
'''output size factors - requires cds object.'''
size_factors = r('''sizeFactors( cds )''')
samples = r('''names(sizeFactors(cds))''')
with iotools.open_file(outfile, "w") as outf:
outf.write("sample\tfactor\n")
for name, x in zip(samples, size_factors):
outf.write("%s\t%s\n" % (name, str(x)))
def deseqPlotCorrelationHeatmap(outfile, vsd):
'''plot a heatmap
Use variance stabilized data in object vsd.
Should be 'blind', as then the transform is
not informed by the experimental design.
'''
# rpy2.4.2 - passing of arrays seems to be broken - do it in R
# dists = r['as.matrix'](R.dist(R.t(R.exprs(vsd))))
dists = r('''as.matrix(dist(t(exprs(vsd))))''')
R.png(outfile)
r['heatmap.2'](
dists,
trace='none',
margin=ro.IntVector((10, 10)))
r['dev.off']()
def deseqPlotGeneHeatmap(outfile,
data,
Rowv=False,
Colv=False):
'''plot a heatmap of all genes
Use variance stabilized data in object vsd.
Should be 'blind', as then the transform is
not informed by the experimental design.
'''
if len(data) == 0:
return
# do not print if not enough values in one
# direction (single row or column)
if min(R.dim(data)) < 2:
return
R.png(outfile, width=500, height=2000)
hmcol = R.colorRampPalette(r['brewer.pal'](9, "GnBu"))(100)
r['heatmap.2'](
data,
col=hmcol,
trace="none",
dendrogram="none",
Rowv=Rowv,
Colv=Colv,
labRow=False,
margin=ro.IntVector((5, 5)),
lhei=ro.IntVector((1, 10)),
key=False)
r['dev.off']()
def deseqPlotPCA(outfile, vsd, max_genes=500):
'''plot a PCA
Use variance stabilized data in object vsd.
Should be 'blind', as then the transform is
not informed by the experimental design.
'''
R.png(outfile)
# if there are more than 500 genes (after filtering)
# use the 500 most variable in the PCA
# else use the number of genes
r('''ntop = ifelse(as.integer(dim(vsd))[1] >= %(max_genes)i,
%(max_genes)i,
as.integer(dim(vsd))[1])''' % locals())
try:
r('''plotPCA(vsd)''')
except RRuntimeError as msg:
E.warn("can not plot PCA: %s" % msg)
r['dev.off']()
def deseqPlotPairs(outfile):
'''requires counts table'''
# Plot pairs
R.png(outfile, width=960, height=960)
plotPairs()
r['dev.off']()
def deseqPlotPvaluesAgainstRowsums(outfile):
'''plot pvalues against row sum rank.
This plot is useful to see if quantile filtering could
be applied.
'''
r('''counts_sum = rowSums( countsTable )''')
R.png(outfile)
r('''plot( rank( counts_sum)/length(counts_sum),
-log10( res$pval),
pch = 16,
cex= 0.1)''')
r('''abline( a=3, b=0, col='red')''')
r['dev.off']()
def deseqParseResults(control_name, treatment_name, fdr, vsd=False):
'''parse deseq output.
retrieve deseq results from object 'res' in R namespace.
The 'res' object is a dataframe with the following columns (from the
DESeq manual):
id: The ID of the observable, taken from the row names of the
counts slots.
baseMean: The base mean (i.e., mean of the counts divided by the size
factors) for the counts for both conditions
baseMeanA: The base mean (i.e., mean of the counts divided by the size
factors) for the counts for condition A
baseMeanB: The base mean for condition B
foldChange: The ratio meanB/meanA
log2FoldChange: The log2 of the fold change
pval: The p value for rejecting the null hypothesis 'meanA==meanB'
padj: The adjusted p values (adjusted with 'p.adjust( pval,
method="BH")')
vsd_log2FoldChange: The log2 fold change after variance stabilization.
This data field is not part of DESeq proper, but has been added
in this module in the runDESeq() method.
Here, 'conditionA' is 'control' and 'conditionB' is 'treatment'
such that a foldChange of 2 means that treatment is twice
upregulated compared to control.
Returns a list of results.
If vsd is True, the log fold change will be computed from the variance
stabilized data.
'''
results = []
counts = E.Counter()
res_df = pandas2ri.ri2py(r["res"])
for index, data in res_df.iterrows():
counts.input += 1
# set significant flag
if data['padj'] <= fdr:
signif = 1
counts.significant += 1
if data['log2FoldChange'] > 0:
counts.significant_over += 1
else:
counts.significant_under += 1
else:
signif = 0
counts.insignificant += 1
if data['log2FoldChange'] > 0:
counts.all_over += 1
else:
counts.all_under += 1
# set lfold change to 0 if both are not expressed
if data['baseMeanA'] == 0.0 and data['baseMeanB'] == 0.0:
data['foldChange'] = 0
data['log2FoldChange'] = 0
if data['pval'] != r('''NA'''):
status = "OK"
else:
status = "FAIL"
counts[status] += 1
counts.output += 1
# check if our assumptions about the direction of fold change
# are correct
assert (data['foldChange'] > 1) == (data['baseMeanB'] > data['baseMeanA'])
# note that fold change is computed as second group (B) divided by
# first (A)
results.append(GeneExpressionResult._make((
data['id'],
treatment_name,
data['baseMeanB'],
0,
control_name,
data['baseMeanA'],
0,
data['pval'],
data['padj'],
data['log2FoldChange'],
data['foldChange'],
data['transformed_log2FoldChange'],
str(signif),
status)))
return results, counts
def deseq2ParseResults(treatment_name, control_name, fdr, vsd=False):
'''
Standardises the output format from deseq2.
Deseq2 has the following output columns:
baseMean log2FoldChange lfcSE stat pvalue padj
described in
https://bioconductor.org/packages/release/bioc/
vignettes/DESeq2/inst/doc/DESeq2.pdf
Standardised columns are generated from this output as follows:
test_id - the gene or region tested, row names from raw output
treatment_name - the first group in this differential expression
comparison (from the design file)
treatment_mean - the mean expression value for this treatment from the
normalised count table generated by deseq2
treatment_std - the standard deviation of experssion for this treatment
from the normalised count table generated by deseq2
control_name - the second group in this differential expression
comparison (from the design file)
control_mean - the mean expression value for this treatment from the
normalised count table generated by deseq2
control_std - the standard deviation of experssion for this treatment
from the normalised count table generated by deseq2
pvalue - the pvalue generated by deseq2 (from the pvalue column)
qvalue - the adjusted pvalue generated by deseq2 (from the padj column)
l2fold - the log2fold change between normalised counts generated by
deseq2 (log2FoldChange column). If betaPrior is set to TRUE, this is the
shrunken log2 fold change. If set to FALSE, no shrinkage.
fold - control mean / treatment mean
transformed_l2fold - not applicable, set to 0 (see deseq2 manual,
"The variance stabilizing and rlog transformations are provided
for applications other than differential testing,
for example clustering of samples or other machine learning applications.
For differential testing we recommend the DESeqfunction applied to raw
counts"
signif = True if the qvalue is less than the FDR set in `term`PARAMS.
status = OK if a pvalue has been generated, else FAIL
'''
r(''' fdr=%s ''' % fdr)
# assign standard column names
r('''cols = c("test_id",
"treatment_name",
"treatment_mean",
"treatment_std",
"control_name",
"control_mean",
"control_std",
"pvalue",
"qvalue",
"l2fold",
"fold",
"transformed_l2fold",
"signif",
"status") ''')
# extract normalised counts
r('''normalcounts = counts(dds, normalized=T)''')
# build empty dataframe
r('''res2 = data.frame(matrix(nrow=nrow(res), ncol=length(cols)))''')
r('''colnames(res2) = cols''')
# fill columns with values described above
r('''res2['test_id'] = rownames(res)''')
r('''g = unique(groups[groups == "%s" | groups == "%s"])''' % (treatment_name, control_name))
r('''g1 = which(groups == g[1])''')
r('''g2 = which(groups == g[2])''')
r('''res2['treatment_name'] = g[1]''')
r('''res2['treatment_mean'] = rowMeans(normalcounts[,g1])''')
r('''res2['treatment_std'] = apply(normalcounts[,g1], 1, sd)''')
r('''res2['control_name'] = g[2]''')
r('''res2['control_mean'] = rowMeans(normalcounts[,g2])''')
r('''res2['control_std'] = apply(normalcounts[,g2], 1, sd)''')
r('''res2['pvalue'] = res$pvalue''')
r('''res2['qvalue'] = res$padj''')
r('''res2['l2fold'] = res$log2FoldChange''')
# Fold change here does not reflect the shrinkage applied to
# log2fold changes
r('''res2['fold'] = res2$control_mean / res2$treatment_mean''')
r('''res2['signif'] = as.integer(res2$qvalue <= fdr)''')
r('''res2['status'] = ifelse(is.na(res2$pvalue), "FAIL", "OK")''')
# replace l2fold change and fold for expression levels of 0 in treatment
# and control with 0
r('''z1 = which(res2$treatment_mean == 0)''')
r('''z2 = which(res2$control_mean == 0)''')
r('''zeros = intersect(z1, z2)''')
r('''res2$l2fold[zeros] = 0''')
r('''res2$fold[zeros] = 0''')
# occupy transformed l2fold with 0s
r('''res2$transformed_l2fold = 0''')
D = r('res2')
D.index = D['test_id']
D = D.drop('test_id', 1)
return D
def runDESeq(outfile,
outfile_prefix="deseq.",
fdr=0.1,
prefix="",
fit_type="parametric",
dispersion_method="pooled",
sharing_mode="maximum",
ref_group=None,
ref_regex=None,
):
'''run DESeq on countsTable.
Results are stored in *outfile* and files prefixed by *outfile_prefix*.
The current analysis follows the analysis as outlined in version
1.14.0
DESeq ignores any pair information in the design matrix.
The output is treatment and control. Fold change values are
computed as treatment divided by control.
'''
# load library
r('''suppressMessages(library('DESeq'))''')
r('''suppressMessages(library('gplots'))''')
r('''suppressMessages(library('RColorBrewer'))''')
groups, pairs, has_replicates, has_pairs = groupTagData(ref_group,
ref_regex)
# Run DESeq
# Create Count data object
E.info("running DESeq: replicates=%s" % (has_replicates))
r('''cds <-newCountDataSet( countsTable, groups) ''')
# Estimate size factors
r('''cds <- estimateSizeFactors( cds )''')
no_size_factors = r('''is.na(sum(sizeFactors(cds)))''')[0]
if no_size_factors:
E.warn("no size factors - can not estimate - no output")
return
# estimate variance
if has_replicates:
E.info("replicates - estimating variance from replicates")
else:
E.info("no replicates - estimating variance with method='blind'")
dispersion_method = "blind"
E.info("dispersion_method=%s, fit_type=%s, sharing_mode=%s" %
(dispersion_method, fit_type, sharing_mode))
r('''cds <- estimateDispersions( cds,
method='%(dispersion_method)s',
fitType='%(fit_type)s',
sharingMode='%(sharing_mode)s')''' % locals())
# bring into python namespace
cds = r('''cds''')
# plot fit - if method == "pooled":
if dispersion_method == "pooled":
R.png('%sdispersion_estimates_pooled.png' %
outfile_prefix)
R.plotDispEsts(cds)
r['dev.off']()
elif not has_replicates:
# without replicates the following error appears
# in the rpy2.py2ri conversion:
# 'dims' cannot be of length 0
pass
else:
dispersions = r('''ls(cds@fitInfo)''')
for dispersion in dispersions:
R.png('%sdispersion_estimates_%s.png' %
(outfile_prefix, dispersion))
R.plotDispEsts(cds, name=dispersion)
r['dev.off']()
# plot size factors
deseqPlotSizeFactors('%(outfile_prefix)ssize_factors.png' % locals())
# output size factors
deseqOutputSizeFactors("%(outfile_prefix)ssize_factors.tsv" % locals())
# plot scatter plots of pairs
deseqPlotPairs('%(outfile_prefix)spairs.png' % locals())
if dispersion_method not in ("blind",):
# also do a blind dispersion estimate for
# a variance stabilizing transform
r('''cds_blind <- estimateDispersions( cds,
method='blind',
fitType='%(fit_type)s',
sharingMode='%(sharing_mode)s')''' % locals())
else:
r('''cds_blind = cds''')
# perform variance stabilization for log2 fold changes
vsd = r('''vsd = varianceStabilizingTransformation(cds_blind)''')
# output normalized counts (in order)
# gzfile does not work with rpy 2.4.2 in python namespace
# using R.gzfile, so do it in R-space
r('''t = counts(cds, normalized=TRUE);
write.table(t[order(rownames(t)),],
file=gzfile('%(outfile_prefix)scounts.tsv.gz'),
row.names=TRUE,
col.names=NA,
quote=FALSE,
sep='\t') ''' % locals())
# output variance stabilized counts (in order)
r('''t = exprs(vsd);
write.table(t[order(rownames(t)),],
file=gzfile('%(outfile_prefix)svsd.tsv.gz'),
row.names=TRUE,
col.names=NA,
quote=FALSE,
sep='\t')
''' % locals())
# plot correlation heatmap of variance stabilized data
deseqPlotCorrelationHeatmap(
'%scorrelation_heatmap.png' % outfile_prefix,
vsd)
# plot PCA
deseqPlotPCA('%spca.png' % outfile_prefix,
vsd)
# plot gene heatmap for all genes - order by average expression
# subtract one to get numpy indices
select = R.order(R.rowMeans(R.counts(cds)), decreasing=True)
# the following uses R-based indexing
deseqPlotGeneHeatmap(
'%sgene_heatmap.png' % outfile_prefix,
r['as.matrix'](R.exprs(vsd).rx(select)))
# plot heatmap of top 200 expressed genes
deseqPlotGeneHeatmap(
'%sgene_heatmap_top200.png' % outfile_prefix,
r['as.matrix'](R.exprs(vsd).rx(select[:200])))
# Call diffential expression for all pairings of groups included in the
# design
all_results = []
for combination in itertools.combinations(groups, 2):
control, treatment = combination
gfix = "%s_vs_%s_" % (control, treatment)
outfile_groups_prefix = outfile_prefix + gfix
E.info(("calling differential expression for "
"control=%s vs treatment=%s") %
(control, treatment))
res = r('''res = nbinomTest(cds, '%s', '%s')''' % (control, treatment))
# plot significance
R.png('''%(outfile_groups_prefix)ssignificance.png''' % locals())
r('''plot(
res$baseMean,
res$log2FoldChange,
log="x",
pch=20, cex=.1,
col = ifelse( res$padj < %(fdr)s, "red", "black"))''' % locals())
r['dev.off']()
# plot pvalues against rowsums
deseqPlotPvaluesAgainstRowsums(
'%(outfile_groups_prefix)spvalue_rowsums.png' % locals())
E.info("Generating output (%s vs %s)" % (control, treatment))
# get variance stabilized fold changes - note the reversal of
# treatment/control
r('''vsd_l2f =
(rowMeans(exprs(vsd)[,conditions(cds) == '%s', drop=FALSE])
- rowMeans( exprs(vsd)[,conditions(cds) == '%s', drop=FALSE]))''' %
(treatment, control))
# plot vsd correlation, see Figure 14 in the DESeq manual
# if you also want to colour by expression level
R.png('''%(outfile_groups_prefix)sfold_transformation.png''' %
locals())
r('''plot(
res$log2FoldChange, vsd_l2f,
pch=20, cex=.1,
col = ifelse( res$padj < %(fdr)s, "red", "black" ) )''' % locals())
r['dev.off']()
# plot heatmap of differentially expressed genes
# plot gene heatmap for all genes - order by average expression
select = r('''select = res['padj'] < %f''' % fdr)
if r('''sum(select)''')[0] > 0:
E.info('%s vs %s: plotting %i genes in heatmap' %
(treatment, control, len(select)))
data = R.exprs(vsd).rx(select)
if not isinstance(data, rpy2.robjects.vectors.FloatVector):
order = R.order(R.rowMeans(data), decreasing=True)
deseqPlotGeneHeatmap(
'%sgene_heatmap.png' % outfile_groups_prefix,
r['as.matrix'](data[order]),
Colv=False,
Rowv=True)
else:
E.warn('can not plot differentially expressed genes')
else:
E.warn('no differentially expressed genes at fdr %f' % fdr)
# Plot pvalue histogram
R.png('''%(outfile_groups_prefix)spvalue_histogram.png''' % locals())
r('''pvalues = res$pval''')
r('''hist(pvalues, breaks=50, col='skyblue' )''')
r['dev.off']()
# Plot diagnostic plots for FDR
if has_replicates:
r('''orderInPlot = order(pvalues)''')
r('''showInPlot = (pvalues[orderInPlot] < 0.08)''')
# Jethro - previously plotting x =
# pvalues[orderInPlot][showInPlot]
# pvalues[orderInPlot][showInPlot] contains all NA values
# from pvalues which(showInPlot) doesn't... removing NA
# values
r('''true.pvalues <- pvalues[orderInPlot][showInPlot]''')
r('''true.pvalues <- true.pvalues[is.finite(true.pvalues)]''')
if r('''sum(showInPlot)''')[0] > 0:
R.png('''%(outfile_groups_prefix)sfdr.png''' % locals())
# failure when no replicates:
# rpy2.rinterface.RRuntimeError:
# Error in plot.window(...) : need finite 'xlim' values
r('''plot( seq( along=which(showInPlot)),
true.pvalues,
pch='.',
xlab=expression(rank(p[i])),
ylab=expression(p[i]))''')
r('''abline(a = 0, b = %(fdr)f / length(pvalues), col = "red")
''' % locals())
r['dev.off']()
else:
E.warn('no p-values < 0.08')
# Add log2 fold with variance stabilized l2fold value
r('''res$transformed_log2FoldChange = vsd_l2f''')
# Parse results and parse to file
results, counts = deseqParseResults(control,
treatment,
fdr=fdr)
all_results += results
E.info(counts)
outf = iotools.open_file(
"%(outfile_groups_prefix)ssummary.tsv" % locals(), "w")
outf.write("category\tcounts\n%s\n" % counts.asTable())
outf.close()
writeExpressionResults(outfile, all_results)
def runDESeq2(outfile,
outfile_prefix="deseq2",
fdr=0.1,
ref_group=None,
model=None,
contrasts=None,
plot=1,
):
"""
Run DESeq2 on counts table.
If no model is passed, then defaults to the group column in design file
Does not make use of group tag data bc function doesn't accomodate
multi-factor designs
To Do: Parse results into standard output format.
KB: I have done this but I'm not sure if it is compatible with complex
design tables
Fix fact that plotMA is hardcoded.
"""
# load libraries
r('''suppressMessages(library('DESeq2'))''')
# Create metadata... this will eventually be a pandas dataframe
if isinstance(r('''factors'''), rpy2.robjects.vectors.DataFrame):
E.info("DESeq2: Merging additional factors in design file to"
"create metadata table")
r('''mdata <- cbind(groups, factors)''')
mdata = tuple(r('''names(mdata)'''))
else:
r('''mdata <- data.frame(group=groups)''')
mdata = "group"
E.info("DESeq2 colData headers are: %s" % mdata)
# Check for model and that model terms are in metadata table
if model:
assert contrasts, "Must specifiy contrasts if model design provided"
terms = set([x for x in re.split("\W", model) if x != ''])
assert terms.issubset(mdata), \
"DESeq2: design formula has terms not present in colData"
else:
if mdata != "group":
E.warn("DESeq2 model specified, with no metadata in design file")
terms = ["group", ]
model = "~ group"
E.info("DESeq2 design formula is: %s" % model)
# Create DESeqDataSet, using countsTable, mdata, model
r('''suppressMessages(dds <- DESeqDataSetFromMatrix(countData=countsTable,
colData=mdata,
design=%(model)s))''' % locals())
# WARNING: This is not done automatically... I don't know why?
r('''colnames(dds) <- colnames(countsTable)''')
E.info("Combined colata, design formula and counts table to create"
" DESeqDataSet instance")
# Rlog transform
r('''suppressMessages(rld <- rlog(dds))''')
if plot == 1:
# Plot PCA of rlog transformed count data for top 500
for factor in terms:
outf = outfile_prefix + factor + "_PCAplot500.tiff"
E.info("Creating PCA plot for factor: %s" % outf)
r('''x <- plotPCA(rld, intgroup="%(factor)s")''' % locals())
# r('''saveRDS(x, '%(outf)s')''' % locals())
r('''tiff("%(outf)s")''' % locals())
r('''plot(x)''')
r('''dev.off()''')
# Extract rlog transformed count data...
r('''rlogtab = as.data.frame(assay(rld))''')
r('''rlogtab$test_id = rownames(rlogtab)''')
r('''rlogtab = rlogtab[, c(ncol(rlogtab), 1:ncol(rlogtab)-1)]''')
r('''rlogtab = as.data.frame(rlogtab)''')
R.data('rlogtab')
rlog_out = r('rlogtab')
rlogoutf = outfile_prefix + "rlog.tsv"
rlog_out.to_csv(rlogoutf, sep="\t", index=False)
os.system("gzip %s" % rlogoutf)
# Run DESeq2
r('''suppressMessages(dds <- DESeq(dds))''')
E.info("Completed DESeq2 differential expression analysis")
# Extract contrasts...
if contrasts:
contrasts = (x.split(":") for x in contrasts.split(","))
else:
# created by loadTagData...
groups = r('''levels(groups)''')
contrasts = (("group",) + x for x in itertools.combinations(groups, 2))
df_final = pandas.DataFrame()
raw_final = pandas.DataFrame()
all_results = []
for combination in contrasts:
variable, control, treatment = combination
# Fetch results
gfix = "%s_%s_vs_%s" % (variable, control, treatment)
outfile_groups_prefix = outfile_prefix + gfix + "_MAplot.png"
r('''res <- results(dds, contrast=c("%(variable)s",
"%(treatment)s",
"%(control)s"))''' % locals())
E.info("Extracting contrast for levels %s (treatment) vs %s (control)"
" for factor %s" % (treatment, control, variable))
# plot MA plot
if plot == 1:
r('''png("%(outfile_groups_prefix)s")''' % locals())
r('''plotMA(res, alpha=%f)''' % fdr)
r('''dev.off()''')
E.info("Plotted MA plot for levels %s (treatment) vs %s (control)"
" for factor %s" % (treatment, control, variable))
r('''res_df <- as.data.frame(res)''')
r('''res_df$test_id = rownames(res_df)''')
r('''res_df = res_df[, c(ncol(res_df), 1:ncol(res_df)-1)]''')
R.data('res_df')
raw_out = r('res_df')
# manipulate output into standard format
df_out = deseq2ParseResults(treatment, control, fdr, vsd=False)
# label the deseq2 raw output file and append it to the raw output tab
raw_out["treatment"] = [treatment, ]*len(df_out.index)
raw_out["control"] = [control, ]*len(df_out.index)
raw_out["variable"] = [variable, ]*len(df_out.index)
raw_final = raw_final.append(raw_out, ignore_index=True)
# write the standardised output table
df_out.to_csv(iotools.open_file(outfile_prefix + gfix + ".tsv.gz", "w"),
sep="\t",
index_label="test_id")
E.info("Extracted results table for contrast '%s' (treatment) vs '%s'"
" (control) for factor '%s'" % (treatment, control, variable))
# append to final dataframe
df_out.reset_index(inplace=True)
df_out.rename(columns={"index": "test_id"}, inplace=True)
df_final = df_final.append(df_out, ignore_index=True)
results = df_final.values.tolist()
# write final dataframe into standard format
writeExpressionResults(outfile, results)
rawoutf = outfile_prefix + "raw.tsv"
raw_final.to_csv(rawoutf, sep="\t", index=False)
os.system("gzip %s" % rawoutf)
Design = collections.namedtuple("Design", ("include", "group", "pair"))
def readDesignFile(design_file):
'''reads a design file.'''
design = collections.OrderedDict()
with iotools.open_file(design_file) as inf:
for line in inf:
if line.startswith("track"):
continue
track, include, group, pair = line.split("\t")[:4]
if track in design:
raise ValueError("duplicate track '%s'" % track)
design[track] = Design._make((int(include), group, pair))
return design
def plotTagStats(infile, design_file, outfile_prefix):
'''provide summary plots for tag data.'''
loadTagData(infile, design_file)
nobservations, nsamples = filterTagData()
if nobservations == 0:
E.warn("no observations - no output")
return
if nsamples == 0:
E.warn("no samples remain after filtering - no output")
return
groups, pairs, has_replicates, has_pairs = groupTagData()
# import rpy2.robjects.lib.ggplot2 as ggplot2
r('''suppressMessages(library('ggplot2'))''')
r('''suppressMessages(library('reshape'))''')
r('''d = melt( log10(countsTable + 1), variable_name = 'sample' )''')
# Note that ggsave does not work if there is
# X display.
R.png(outfile_prefix + ".densities.png")
r('''gp = ggplot(d)''')
r('''pp = gp + geom_density(aes(x=value, group=sample,
color=sample, fill=sample), alpha=I(1/3))''')
r('''plot(pp)''')
r['dev.off']()
R.png(outfile_prefix + ".boxplots.png")
r('''gp = ggplot(d)''')
r('''pp = gp +
geom_boxplot(aes(x=sample,y=value,color=sample,fill=sample),
size=0.3,
alpha=I(1/3)) +
theme(axis.text.x = element_text( angle=90, hjust=1, size=8 ) )''')
r('''plot(pp)''')
r['dev.off']()
def plotDETagStats(infile, outfile_prefix,
additional_file=None,
join_columns=None,
additional_columns=None):
'''provide summary plots for tag data.
Stratify boxplots and densities according to differential
expression calls.
The input file is the output of any of the DE
tools, see GeneExpressionResults for column names.
Additional file will be joined with infile and any additional
columns will be output as well.
'''
table = pandas.read_csv(iotools.open_file(infile),
sep="\t")
if additional_file is not None:
additional_table = pandas.read_csv(
iotools.open_file(additional_file),
sep="\t")
table = pandas.merge(table,
additional_table,
on=join_columns,
how="left",
sort=False)
# remove index. If it is numbered starting from 1, there is a bug
# in ggplot, see https://github.com/yhat/ggplot/pull/384
table.reset_index(inplace=True)
# add log-transformed count data
table['log10_treatment_mean'] = numpy.log10(table['treatment_mean'] + 1)
table['log10_control_mean'] = numpy.log10(table['control_mean'] + 1)
table['dmr'] = numpy.array(["insignicant"] * len(table))
table.loc[
(table["l2fold"] > 0) & (table["significant"] == 1), "dmr"] = "up"
table.loc[
(table["l2fold"] < 0) & (table["significant"] == 1), "dmr"] = "down"
def _dplot(table, outfile, column):
plot = ggplot.ggplot(
ggplot.aes(column,
colour='dmr',
fill='dmr'),
data=table) + \
ggplot.geom_density(alpha=0.5)
try:
plot.save(filename=outfile)
except Exception as msg:
E.warn("no plot for %s: %s" % (column, msg))
def _bplot(table, outfile, column):
plot = ggplot.ggplot(
ggplot.aes(x='dmr', y=column),
data=table) + \
ggplot.geom_boxplot()
try:
plot.save(filename=outfile)
except ValueError as msg:
# boxplot fails if all values are the same
# see https://github.com/yhat/ggplot/issues/393
E.warn(msg)
# TODO: ggplot not supported, replace with plotnine
# _dplot(table,
# outfile_prefix + ".densities_tags_control.png",
# "log10_control_mean")
# _dplot(table,
# outfile_prefix + ".densities_tags_treatment.png",
# "log10_treatment_mean")
# _bplot(table,
# outfile_prefix + ".boxplot_tags_control.png",
# "log10_control_mean")
# _bplot(table,
# outfile_prefix + ".boxplot_tags_treatment.png",
# "log10_treatment_mean")
if additional_columns:
for column in additional_columns:
_dplot(table,
outfile_prefix + ".densities_%s.png" % column,
column)
_bplot(table,
outfile_prefix + ".boxplot_%s.png" % column,
column)
return
def runMockAnalysis(outfile,
outfile_prefix,
ref_group=None,
ref_regex=None,
pseudo_counts=0):
'''run a mock analysis on a count table.
compute fold enrichment values, but do not normalize or
perform any test.
'''
groups, pairs, has_replicates, has_pairs = groupTagData(ref_group,
ref_regex)
all_results = []
for combination in itertools.combinations(groups, 2):
control, treatment = combination
r('''control_counts = rowSums( countsTable[groups == '%s'] )''' %
control)
r('''treatment_counts = rowSums( countsTable[groups == '%s'] )''' %
treatment)
# add pseudocounts to enable analysis of regions
# that are absent/present
if pseudo_counts:
r('''control_counts = control_counts + %f''' % pseudo_counts)
r('''treatment_counts = treatment_counts + %f''' % pseudo_counts)
r('''fc = treatment_counts / control_counts''')
results = []
for identifier, treatment_count, control_count, foldchange in \
zip(r('''rownames( countsTable)'''),
r('''treatment_counts'''),
r('''control_counts'''),
r('''fc''')):
try:
log2fold = math.log(foldchange)
except ValueError:
log2fold = "Inf"
results.append(GeneExpressionResult._make((
identifier,
treatment,
treatment_count,
0,
control,
control_count,
0,
1,
1,
log2fold,
foldchange,
log2fold,
"0",
"OK")))
all_results.extend(results)
writeExpressionResults(outfile, all_results)
def outputTagSummary(filename_tags,
outfile,
output_filename_pattern,
filename_design=None):
'''output summary values for a count table.'''
E.info("loading tag data from %s" % filename_tags)
if filename_design is not None:
# load all tag data
loadTagData(filename_tags, filename_design)
# filter
nobservations, nsamples = filterTagData()
else:
# read complete table
r('''countsTable = read.delim('%(filename_tags)s',
header = TRUE,
row.names = 1,
stringsAsFactors = TRUE,
comment.char = '#')''' % locals())
nobservations, nsamples = tuple(r('''dim(countsTable)'''))
E.info("read data: %i observations for %i samples" %
(nobservations, nsamples))
# remove samples without data
r('''max_counts = apply(countsTable,2,max)''')
filter_min_counts_per_sample = 1
empty_samples = tuple(
r('''max_counts < %i''' % filter_min_counts_per_sample))
sample_names = r('''colnames(countsTable)''')
nempty_samples = sum(empty_samples)
if nempty_samples:
E.warn("%i empty samples are being removed: %s" %
(nempty_samples,
",".join([sample_names[x]
for x, y in enumerate(empty_samples) if y])))
r('''countsTable <- countsTable[, max_counts >= %i]''' %
filter_min_counts_per_sample)
nobservations, nsamples = tuple(r('''dim(countsTable)'''))
r('''groups = factor(colnames( countsTable ))''')
E.debug("sample names: %s" % r('''colnames(countsTable)'''))
nrows, ncolumns = tuple(r('''dim(countsTable)'''))
outfile.write("metric\tvalue\tpercent\n")
outfile.write("number of observations\t%i\t100\n" % nobservations)
outfile.write("number of samples\t%i\t100\n" % nsamples)
# Count windows with no data
r('''max_counts = apply(countsTable,1,max)''')
# output distribution of maximum number of counts per window
outfilename = output_filename_pattern + "max_counts.tsv.gz"
E.info("outputting maximum counts per window to %s" % outfilename)
r('''write.table(table(max_counts),
file=gzfile('%(outfilename)s'),
sep="\t",
row.names=FALSE,
quote=FALSE)''' %
locals())
# removing empty rows
E.info("removing rows with no counts in any sample")
r('''countsTable = countsTable[max_counts>0,]''')
if nrows > 0:
for x in range(0, 20):
nempty = tuple(r('''sum(max_counts <= %i)''' % x))[0]
outfile.write("max per row<=%i\t%i\t%f\n" %
(x, nempty, 100.0 * nempty / nrows))
E.info("removed %i empty rows" % tuple(r('''sum(max_counts == 0)''')))
observations, samples = tuple(r('''dim(countsTable)'''))
E.info("trimmed data: %i observations for %i samples" %
(observations, samples))
# build correlation
r('''correlations = cor(countsTable)''')
outfilename = output_filename_pattern + "correlation.tsv"
E.info("outputting sample correlations table to %s" % outfilename)
r('''write.table(correlations, file='%(outfilename)s',
sep="\t",
row.names=TRUE,
col.names=NA,
quote=FALSE)''' % locals())
# output scatter plots
outfilename = output_filename_pattern + "scatter.png"
E.info("outputting scatter plots to %s" % outfilename)
R.png(outfilename, width=960, height=960)
plotPairs()
r['dev.off']()
# output heatmap based on correlations
outfilename = output_filename_pattern + "heatmap.svg"
E.info("outputting correlation heatmap to %s" % outfilename)
R.svg(outfilename)
plotCorrelationHeatmap(method="correlation")
r['dev.off']()
# output PCA
outfilename = output_filename_pattern + "pca.svg"
E.info("outputting PCA plot to %s" % outfilename)
R.svg(outfilename)
plotPCA(groups=False)
r['dev.off']()
# output an MDS plot
r('''suppressMessages(library('limma'))''')
outfilename = output_filename_pattern + "mds.svg"
E.info("outputting mds plot to %s" % outfilename)
R.svg(outfilename)
try:
r('''plotMDS(countsTable)''')
except RRuntimeError:
E.warn("can not plot mds")
r['dev.off']()
def dumpTagData(filename_tags, filename_design, outfile):
'''output filtered tag table.'''
if outfile == sys.stdout:
outfilename = ""
else:
outfilename = outfile.name
# load all tag data
loadTagData(filename_tags, filename_design)
# filter
nobservations, nsamples = filterTagData()
# output
r('''write.table( countsTable,
file='%(outfilename)s',
sep='\t',
quote=FALSE)''' % locals())
def runTTest(outfile,
outfile_prefix,
fdr=0.1,
ref_group=None,
ref_regex=None):
'''apply a ttest on the data.
For the T-test it is best to use FPKM values as
this method does not perform any library normalization.
'''
groups, pairs, has_replicates, has_pairs = groupTagData(ref_group,
ref_regex)
results = []
for combination in itertools.combinations(groups, 2):
control, treatment = combination
r = r('''r = apply(countsTable, 1,
function(x) { t.test(
x[groups == '%(treatment)s'],
x[groups == '%(control)s']) } )
''' % locals())
for test_id, ttest in zip(r.names, r):
# TS, swapped order below as assignment was incorrect
treatment_mean, control_mean = tuple(ttest.rx2('estimate'))
fold_change = treatment_mean / control_mean
pvalue = tuple(ttest.rx2('p.value'))[0]
significant = (0, 1)[pvalue < fdr]
results.append(GeneExpressionResult._make((test_id,
treatment,
treatment_mean,
0,
control,
control_mean,
0,
pvalue,
pvalue,
numpy.log2(fold_change),
fold_change,
numpy.log2(fold_change),
significant,
"OK")))
writeExpressionResults(outfile, results)
#####################################################################
# Pandas-based functions and matplotlib-based plotting functions ####
#####################################################################
def loadTagDataPandas(tags_filename, design_filename):
'''load tag data for deseq/edger analysis.
*Infile* is a tab-separated file with counts.
*design_file* is a tab-separated file with the
experimental design with four columns::
track include group pair
CW-CD14-R1 0 CD14 1
CW-CD14-R2 0 CD14 1
CW-CD14-R3 1 CD14 1
CW-CD4-R1 1 CD4 1
FM-CD14-R1 1 CD14 2
FM-CD4-R2 0 CD4 2
FM-CD4-R3 0 CD4 2
FM-CD4-R4 0 CD4 2
track
name of track - should correspond to column header in *infile*
include
flag to indicate whether or not to include this data
group
group indicator - experimental group
pair
pair that sample belongs to (for paired tests)
This method creates various R objects:
countsTable : data frame with counts.
groups : vector with groups
pairs : vector with pairs
'''
E.info("loading tag data from %s" % tags_filename)
inf = iotools.open_file(tags_filename)
counts_table = pandas.read_csv(inf,
sep="\t",
index_col=0,
comment="#")
inf.close()
E.info("read data: %i observations for %i samples" %
counts_table.shape)
E.debug("sample names: %s" % list(counts_table.columns))
inf = iotools.open_file(design_filename)
design_table = pandas.read_csv(inf, sep="\t", index_col=0)
inf.close()
E.debug("design names: %s" % list(design_table.index))
missing = set(counts_table.columns).difference(design_table.index)
if missing:
E.warn("missing samples from design file are ignored: %s" % missing)
# remove unnecessary samples
design_table = design_table[design_table["include"] != 0]
E.debug("included samples: %s" % list(design_table.index))
counts_table = counts_table[list(design_table.index)]
E.info("filtered data: %i observations for %i samples" %
counts_table.shape)
return counts_table, design_table
def filterTagDataPandas(counts_table,
design_table,
filter_min_counts_per_row=1,
filter_min_counts_per_sample=10,
filter_percentile_rowsums=0):
'''filter tag data.
* remove rows with at least x number of counts
* remove samples with a maximum of *min_sample_counts*
* remove the lowest percentile of rows in the table, sorted
by total tags per row
'''
# Remove windows with no data
max_counts_per_row = counts_table.max(1)
counts_table = counts_table[
max_counts_per_row >= filter_min_counts_per_row]
observations, samples = counts_table.shape
E.info("trimmed data: %i observations for %i samples" %
(observations, samples))
# remove samples without data
max_counts_per_sample = counts_table.max()
empty_samples = max_counts_per_sample < filter_min_counts_per_sample
sample_names = counts_table.columns
nempty_samples = sum(empty_samples)
if nempty_samples:
E.warn("%i empty samples are being removed: %s" %
(nempty_samples,
",".join([sample_names[x] for x, y in
enumerate(empty_samples) if y])))
raise NotImplementedError("removing empty samples needs to be done")
# r('''countsTable <- countsTable[, max_counts >= %i]''' % filter_min_counts_per_sample)
# r('''groups <- groups[max_counts >= %i]''' % filter_min_counts_per_sample)
# r('''pairs <- pairs[max_counts >= %i]''' % filter_min_counts_per_sample)
# observations, samples = tuple( r('''dim(countsTable)'''))
# percentile filtering
if filter_percentile_rowsums > 0:
percentile = float(filter_percentile_rowsums) / 100.0
sum_counts = counts_table.sum(1)
take = sum_counts > sum_counts.quantile(percentile)
E.info("percentile filtering at level %f: keep=%i, discard=%i" %
(filter_percentile_rowsums,
sum(take),
len(take) - sum(take)))
counts_table = counts_table[take]
return counts_table
def identifyVariablesPandas(design_table):
# design table should have been processed by loadTagDataPandas already
# just in case, re-filter for not included samples here
design_table = design_table[design_table["include"] != 0]
conds = design_table['group'].tolist()
pairs = design_table['pair'].tolist()
# TS, adapted from JJ code for DESeq2 design tables:
# if additional columns present, pass to 'factors'
if len(design_table.columns) > 3:
factors = design_table.iloc[:, 3:]
else:
factors = None
return conds, pairs, factors
def checkTagGroupsPandas(design_table, ref_group=None):
'''compute groups and pairs from tag data table.'''
conds, pairs, factors = identifyVariablesPandas(design_table)
groups = list(set(conds))
# Relevel the groups so that the reference comes first
# how to do this in python?
# if ref_group is not None:
# r('''groups <- relevel(groups, ref = "%s")''' % ref_group)
# check this works, will need to make factors from normal df
# TS adapted from JJ code for DESeq2 -
# check whether there are additional factors in design file...
if factors:
E.warn("There are additional factors in design file that are ignored"
" by groupTagData: ", factors)
else:
pass
# Test if replicates exist - at least one group must have multiple samples
max_per_group = max([conds.count(x) for x in groups])
has_replicates = max_per_group >= 2
# Test if pairs exist:
npairs = len(set(pairs))
has_pairs = npairs == 2
# ..if so, at least two samples are required per pair
if has_pairs:
min_per_pair = min([pairs.count(x) for x in set(pairs)])
has_pairs = min_per_pair >= 2
return groups, pairs, conds, factors, has_replicates, has_pairs
ResultColumns = ["test_id", "treatment_name", "treatment_mean",
"treatment_std", "control_name", "control_mean",
"control_std", "p_value", "p_value_adj", "l2fold", "fold",
"transformed_l2fold", "significant", "status"]
ResultColumns_dtype = {"test_id": object, "treatment_name": object,
"treatment_mean": float, "treatment_std":
float, "control_name": object, "control_mean":
float, "control_std": float, "p_value": float,
"p_value_adj": float, "l2fold": float, "fold":
float, "transformed_l2fold": float,
"significant": int, "status": object}
def makeEmptyDataFrameDict():
return {key: [] for key in ResultColumns}
def runTTestPandas(counts_table,
design_table,
outfile,
outfile_prefix,
fdr,
ref_group=None):
'''apply a ttest on the data.
For the T-test it is best to use FPKM values as
this method does not perform any library normalization.
Alternatively, perform normalisation on counts table using Counts.py
'''
stats = importr('stats')
(groups, pairs, conds, factors, has_replicates,
has_pairs) = checkTagGroupsPandas(design_table, ref_group)
df_dict = makeEmptyDataFrameDict()
for combination in itertools.combinations(groups, 2):
# as each combination may have different numbers of samples in control
# and treatment, calculations have to be performed on a per
# combination basis
control, treatment = combination
n_rows = counts_table.shape[0]
df_dict["control_name"].extend((control,)*n_rows)
df_dict["treatment_name"].extend((treatment,)*n_rows)
df_dict["test_id"].extend(counts_table.index.tolist())
# subset counts table for each combination
c_keep = [x == control for x in conds]
control_counts = counts_table.iloc[:, c_keep]
t_keep = [x == treatment for x in conds]
treatment_counts = counts_table.iloc[:, t_keep]
c_mean = control_counts.mean(axis=1)
df_dict["control_mean"].extend(c_mean)
df_dict["control_std"].extend(control_counts.std(axis=1))
t_mean = treatment_counts.mean(axis=1)
df_dict["treatment_mean"].extend(t_mean)
df_dict["treatment_std"].extend(treatment_counts.std(axis=1))
t, prob = ttest_ind(control_counts, treatment_counts, axis=1)
df_dict["p_value"].extend(prob)
# what about zero values?!
df_dict["fold"].extend(t_mean / c_mean)
df_dict["p_value_adj"].extend(
list(stats.p_adjust(FloatVector(df_dict["p_value"]), method='BH')))
df_dict["significant"].extend(
[int(x < fdr) for x in df_dict["p_value_adj"]])
df_dict["l2fold"].extend(list(numpy.log2(df_dict["fold"])))
# note: the transformed log2 fold change is not transformed!
df_dict["transformed_l2fold"].extend(list(numpy.log2(df_dict["fold"])))
# set all status values to "OK"
df_dict["status"].extend(("OK",)*n_rows)
results = pandas.DataFrame(df_dict)
results.set_index("test_id", inplace=True)
results.to_csv(outfile, sep="\t", header=True, index=True)
def plotCorrelationHeatmapMatplot(counts, outfile, method="correlation",
cor_method="pearson"):
'''plot a heatmap of correlations derived from
countsTable.
'''
# to do: add other methods?
# define outside function? - Will we reuse?
heatmap_cdict_b_to_y = {
'red': ((0.0, 0.4, .4), (0.01, .4, .4), (1., .95, .95)),
'green': ((0.0, 0.4, 0.4), (0.01, .4, .4), (1., .95, .95)),
'blue': ((0.0, .9, .9), (0.01, .9, .9), (1., 0.4, 0.4))}
cm = matplotlib.colors.LinearSegmentedColormap(
'', heatmap_cdict_b_to_y, 256)
df = counts.corr(method=cor_method)
plt.pcolor(np.array(df), cmap=cm)
plt.colorbar()
plt.title("%(cor_method)s correlation heatmap" % locals())
plt.yticks(np.arange(0.5, len(df.index), 1), df.index)
plt.xticks(np.arange(0.5, len(df.columns), 1), df.columns, rotation=90)
plt.tight_layout()
plt.savefig(outfile)
def runEdgeRPandas(counts,
design_table,
outfile,
outfile_prefix="edger.",
fdr=0.1,
prefix="",
dispersion=None,
ref_group=None):
'''run EdgeR on countsTable.
Results are stored in *outfile* and files prefixed by *outfile_prefix*.
The dispersion is usually measuered from replicates. If there are no
replicates, you need to set the *dispersion* explicitely.
See page 13 of the EdgeR user guide::
2. Simply pick a reasonable dispersion value, based on your
experience with similar data, and use that. Although
subjective, this is still more defensible than assuming Poisson
variation. Typical values are dispersion=0.4 for human data,
dispersion=0.1 for data on genetically identical model
organisms or dispersion=0.01 for technical replicates.
'''
# load library
r('''suppressMessages(library('edgeR'))''')
(groups, pairs, conds, factors, has_replicates,
has_pairs) = checkTagGroupsPandas(design_table, ref_group)
if not has_replicates and dispersion is None:
raise ValueError("no replicates and no dispersion")
# output heatmap plot
plotCorrelationHeatmapMatplot(counts,
'%(outfile_prefix)sheatmap.png' % locals(),
cor_method="spearman")
E.info('running EdgeR: groups=%s, pairs=%s, replicates=%s, pairs=%s' %
(groups, pairs, has_replicates, has_pairs))
r_counts = pandas2ri.py2ri(counts)
passDFtoRGlobalEnvironment = r('''function(df){
countsTable <<- df}''')
passDFtoRGlobalEnvironment(r_counts)
if has_pairs:
# output difference between groups
# TS #####
# this is performed on non-normalised data
# should we use Counts.py to normalise first?
# also, this isn't edgeR specific, should this be
# moved to a seperate summary function?
# also move the MDS plotting?
# #####
first = True
pairs_df = pandas.DataFrame()
nrows = len(counts.index)
n = 0
for g1, g2 in itertools.combinations(groups, 2):
keep_a = [x == g1 for x in conds]
counts_a = counts.iloc[:, keep_a]
keep_b = [x == g2 for x in conds]
counts_b = counts.iloc[:, keep_b]
index = range(n, n+nrows)
n += nrows
a = counts_a.sum(axis=1)
b = counts_b.sum(axis=1)
diff = a-b
diff.sort()
temp_df = pandas.DataFrame({"cumsum": np.cumsum(diff).tolist(),
"comb": "_vs_".join([g1, g2]),
"id": range(0, nrows)},
index=index)
pairs_df = pairs_df.append(temp_df)
plot_pairs = r('''function(df, outfile){
suppressMessages(library('ggplot2'))
p = ggplot(df, aes(y=cumsum, x=id)) +
geom_line(aes(col=factor(comb))) +
scale_color_discrete(name="Comparison") +
xlab("index") + ylab("Cumulative sum")
ggsave("%(outfile_prefix)sbalance_groups.png", plot = p)}
''' % locals())
r_pairs_df = pandas2ri.py2ri(pairs_df)
plot_pairs(r_pairs_df)
# output difference between pairs within groups
first = True
legend = []
n = 0
pairs_in_groups_df = | pandas.DataFrame() | pandas.DataFrame |
"""
Misc Functionality
Only a function to make a DataFrame out of the results at the moment
"""
import itertools
import pandas as pd
from .. import colnames as cn
def make_seg_info_dframe(centers, sizes, bboxes, index_name=cn.seg_id):
"""
Collects the three dictionaries describing the segments of a volume
into a DataFrame
"""
assert len(centers) == len(sizes) == len(bboxes), "mismatched inputs"
if len(centers) == 0:
return empty_cleft_df(index_name)
# Segment Size
sizes_df = | pd.Series(sizes, name=cn.size) | pandas.Series |
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
from nose.tools import (assert_equal,
assert_almost_equal,
raises,
ok_,
eq_)
from rsmtool.preprocessor import (FeaturePreprocessor,
FeatureSubsetProcessor,
FeatureSpecsProcessor)
class TestFeaturePreprocessor:
def setUp(self):
self.fpp = FeaturePreprocessor()
def test_select_candidates_with_N_or_more_items(self):
data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
df_included_expected = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2,
'sc1': [2, 3, 1, 5, 6]})
df_excluded_expected = pd.DataFrame({'candidate': ['c'],
'sc1': [1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2)
assert_frame_equal(df_included, df_included_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_select_candidates_with_N_or_more_items_all_included(self):
data = pd.DataFrame({'candidate': ['a'] * 2 + ['b'] * 2 + ['c'] * 2,
'sc1': [2, 3, 1, 5, 6, 1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2)
assert_frame_equal(df_included, data)
assert_equal(len(df_excluded), 0)
def test_select_candidates_with_N_or_more_items_all_excluded(self):
data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 4)
assert_frame_equal(df_excluded, data)
assert_equal(len(df_included), 0)
def test_select_candidates_with_N_or_more_items_custom_name(self):
data = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
df_included_expected = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2,
'sc1': [2, 3, 1, 5, 6]})
df_excluded_expected = pd.DataFrame({'ID': ['c'],
'sc1': [1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2, 'ID')
assert_frame_equal(df_included, df_included_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_rename_no_columns(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length',
'raw', 'candidate', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2',
'length', 'raw', 'candidate')
assert_array_equal(df.columns,
['spkitemid', 'sc1', 'sc2', 'length', 'raw',
'candidate', 'feature1', 'feature2'])
def test_rename_no_columns_some_values_none(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2'])
def test_rename_no_used_columns_but_unused_columns_with_default_names(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2',
'##length##', 'feature1', 'feature2'])
def test_rename_used_columns(self):
df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'SR', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', 'SR', None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length',
'raw', 'feature1', 'feature2'])
def test_rename_used_columns_and_unused_columns_with_default_names(self):
df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'raw', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length',
'##raw##', 'feature1', 'feature2'])
def test_rename_used_columns_with_swapped_names(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'raw', 'words', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'sc2', 'sc1', 'words', None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc2', 'sc1', '##raw##',
'length', 'feature1', 'feature2'])
def test_rename_used_columns_but_not_features(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'length', 'feature2'])
df = self.fpp.rename_default_columns(df, ['length'], 'id', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length', 'feature2'])
def test_rename_candidate_column(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length',
'apptNo', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [],
'spkitemid', 'sc1', 'sc2', None, None, 'apptNo')
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', '##length##',
'candidate', 'feature1', 'feature2'])
def test_rename_candidate_named_sc2(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'question', 'l1', 'score'])
df_renamed = self.fpp.rename_default_columns(df, [],
'id', 'sc1', None, None, 'score', 'sc2')
assert_array_equal(df_renamed.columns, ['spkitemid', 'sc1',
'candidate', 'question', 'l1', 'raw'])
@raises(KeyError)
def test_check_subgroups_missing_columns(self):
df = pd.DataFrame(columns=['a', 'b', 'c'])
subgroups = ['a', 'd']
FeaturePreprocessor.check_subgroups(df, subgroups)
def test_check_subgroups_nothing_to_replace(self):
df = pd.DataFrame({'a': ['1', '2'],
'b': ['32', '34'],
'd': ['abc', 'def']})
subgroups = ['a', 'd']
df_out = FeaturePreprocessor.check_subgroups(df, subgroups)
assert_frame_equal(df_out, df)
def test_check_subgroups_replace_empty(self):
df = pd.DataFrame({'a': ['1', ''],
'b': [' ', '34'],
'd': ['ab c', ' ']})
subgroups = ['a', 'd']
df_expected = pd.DataFrame({'a': ['1', 'No info'],
'b': [' ', '34'],
'd': ['ab c', 'No info']})
df_out = FeaturePreprocessor.check_subgroups(df, subgroups)
assert_frame_equal(df_out, df_expected)
def test_filter_on_column(self):
bad_df = pd.DataFrame({'spkitemlab': np.arange(1, 9, dtype='int64'),
'sc1': ['00', 'TD', '02', '03'] * 2})
df_filtered_with_zeros = pd.DataFrame({'spkitemlab': [1, 3, 4, 5, 7, 8],
'sc1': [0.0, 2.0, 3.0] * 2})
df_filtered = pd.DataFrame({'spkitemlab': [3, 4, 7, 8], 'sc1': [2.0, 3.0] * 2})
(output_df_with_zeros,
output_excluded_df_with_zeros) = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=False)
output_df, output_excluded_df = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=True)
assert_frame_equal(output_df_with_zeros, df_filtered_with_zeros)
assert_frame_equal(output_df, df_filtered)
def test_filter_on_column_all_non_numeric(self):
bad_df = pd.DataFrame({'sc1': ['A', 'I', 'TD', 'TD'] * 2,
'spkitemlab': range(1, 9)})
expected_df_excluded = bad_df.copy()
expected_df_excluded.drop('sc1', axis=1, inplace=True)
df_filtered, df_excluded = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=True)
ok_(df_filtered.empty)
ok_("sc1" not in df_filtered.columns)
assert_frame_equal(df_excluded, expected_df_excluded, check_dtype=False)
def test_filter_on_column_std_epsilon_zero(self):
# Test that the function exclude columns where std is returned as
# very low value rather than 0
data = {'id': np.arange(1, 21, dtype='int64'),
'feature_ok': np.arange(1, 21),
'feature_zero_sd': [1.5601] * 20}
bad_df = pd.DataFrame(data=data)
output_df, output_excluded_df = self.fpp.filter_on_column(bad_df,
'feature_zero_sd',
'id',
exclude_zeros=False,
exclude_zero_sd=True)
good_df = bad_df[['id', 'feature_ok']].copy()
assert_frame_equal(output_df, good_df)
ok_(output_excluded_df.empty)
def test_filter_on_column_with_inf(self):
# Test that the function exclude columns where feature value is 'inf'
data = pd.DataFrame({'feature_1': [1.5601, 0, 2.33, 11.32],
'feature_ok': np.arange(1, 5)})
data['feature_with_inf'] = 1 / data['feature_1']
data['id'] = np.arange(1, 5, dtype='int64')
bad_df = data[np.isinf(data['feature_with_inf'])].copy()
good_df = data[~np.isinf(data['feature_with_inf'])].copy()
bad_df.reset_index(drop=True, inplace=True)
good_df.reset_index(drop=True, inplace=True)
output_df, output_excluded_df = self.fpp.filter_on_column(data, 'feature_with_inf',
'id',
exclude_zeros=False,
exclude_zero_sd=True)
assert_frame_equal(output_df, good_df)
assert_frame_equal(output_excluded_df, bad_df)
def test_filter_on_flag_column_empty_flag_dictionary(self):
# no flags specified, keep the data frame as is
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 0, 0, 0],
'flag2': [1, 2, 2, 1]})
flag_dict = {}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.5, 1.1, 2.2, 3.6]})
flag_dict = {'flag1': [0.5, 1.1, 2.2, 3.6, 4.5]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['a', 'b', 'c', 'd']})
flag_dict = {'flag1': ['a', 'b', 'c', 'd', 'e']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.0, 1.0, 2.0, 3.0]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0.0, 1.0, 2.0, 3.0, 4.5]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['4', '1', '2', '3.5']})
flag_dict = {'flag1': [0.0, 1.0, 2.0, 3.5, 4.0]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [4.0, 1.0, 2.0, 3.5]})
flag_dict = {'flag1': ['1', '2', '3.5', '4', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['0.0', '1.0', '2.0', '3.0']})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': ['0.0', '1.0', '2.0', '3.0', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.0', 2, 3.5]})
flag_dict = {'flag1': ['0.0', '1.0', '2.0', '3.5', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.0', 2, 3.0]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.5', 2, 3.5]})
flag_dict = {'flag1': [0.0, 1.5, 2.0, 3.5, 4.0]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.0, 1.0, 2.0, 3.5]})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['0.0', '1.0', '2.0', '3.5']})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [1, 2, 3.5, 'TD']})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_mixed_type_column_mixed_type_dict_filter_preserve_type(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS']})
flag_dict = {'flag1': [1.5, 2, 'TD']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD']})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS']})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_int_flag_column_int_dict(self):
df = pd.DataFrame({'spkitemid': [1, 2, 3, 4, 5, 6],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 2, 2, 3, 4, None]}, dtype=object)
flag_dict = {'flag1': [2, 4]}
df_new_expected = pd.DataFrame({'spkitemid': [2, 3, 5],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2, 2, 4]}, dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': [1, 4, 6],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3, None]}, dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_float_flag_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1.2, 2.1, 2.1, 3.3, 4.2, None]})
flag_dict = {'flag1': [2.1, 4.2]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2.1, 2.1, 4.2]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1.2, 3.3, None]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_str_flag_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': ['a', 'b', 'b', 'c', 'd', None]})
flag_dict = {'flag1': ['b', 'd']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': ['b', 'b', 'd']})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': ['a', 'c', None]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2.0, 'TD', 2.0, None]},
dtype=object)
flag_dict = {'flag1': [1.5, 2.0]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2.0, 2.0]},
dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 'TD', None]},
dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1.5, 2, 2, 'TD', 4, None]},
dtype=object)
flag_dict = {'flag1': [2, 4]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2, 2, 4]},
dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1.5, 'TD', None]},
dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', None]},
dtype=object)
flag_dict = {'flag1': [1.5, 2, 'TD']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD']}, dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, None]}, dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_two_flags_same_responses(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS'],
'flag2': [1, 0, 0, 1, 0, 1]})
flag_dict = {'flag1': [1.5, 2, 'TD'], 'flag2': [0]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD'],
'flag2': [0, 0, 0]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS'],
'flag2': [1, 1, 1]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
| assert_frame_equal(df_excluded, df_excluded_expected) | pandas.testing.assert_frame_equal |
# Import the required packages
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import streamlit as st
import pickle
from pickle import load
from PIL import Image
import seaborn as sns
import statsmodels.api as sm
import lime.lime_tabular
from sklearn.model_selection import train_test_split
import string
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
# Set Recursion Limit
import sys
sys.setrecursionlimit(40000)
import re
import nltk
import regex as re
from nltk.corpus import stopwords
from sklearn.pipeline import make_pipeline
from sklearn.metrics import accuracy_score
import lightgbm as lgb
from lightgbm import LGBMClassifier
import streamlit.components.v1 as components
import tweepy
from collections import Counter
from wordcloud import WordCloud
import datetime
import plotly.express as px
import time
import pydeck as pdk
import SessionState # Assuming SessionState.py lives on this folder
st.sidebar.title('Dashboard Control')
control = st.sidebar.radio('Navigation Bar', ('Home', 'Live Tweet Feed', 'Time Series Analysis', 'XAI'))
if control == 'Home':
### Sentiment Code goes here
st.markdown('<h1 style="color:#8D3DAF;text-align:center;font-family: Garamond, serif;"><b>RAKSHAK</b></h1>',unsafe_allow_html=True)
st.markdown('<h2 style="color:#E07C24;text-align:center;font-family: Georgia, serif;"><b>Time Series Sentiment Analysis Of Natural Hazard Relief Operations Through Social Media Data</b></h2>',unsafe_allow_html=True)
#st.markdown("The dashboard will help the government and humanitarian aid agencies to plan and coordinate the natural disaster relief efforts, resulting in more people being saved and more effective distribution of emergency supplies during a natural hazard")
st.header("Natural Hazard Data Collected Sample")
# Dataset
# Load the Dataset
tweets1 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/nepal_mix_1.csv")[['text','type']]
tweets2 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/italy_mix_1.csv")[['text','type']]
tweets3 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/Covid-19.csv")[['text','type']]
names = [tweets1,tweets2,tweets3]
# Concatenate the datasets
tweets = pd.concat(names,ignore_index = True)
# Reshuffle the dataset
tweets = tweets.sample(frac = 1)
# Reindex the dataset
tweets['index'] = list(range(0,tweets.shape[0],1))
tweets.set_index('index', inplace=True)
tweets['type'] = tweets['type'].map({0: 'Need', 1: 'Availability', 2: 'Other'})
# Change column names for consistency
tweets.columns = ['text', 'type']
# Dataset Description
h = st.sidebar.slider('Select the number of tweets using the slider', 1, 100, 10)
data_tweets = tweets.sample(h)
data_tweets['index'] = list(range(0, h, 1))
data_tweets.set_index('index', inplace=True)
st.table(data_tweets)
# Checking for class balancing and get unique labels:
st.header("Count Of Tweets In Each Class")
chart_visual_class_balancing = st.sidebar.checkbox('Class Labels', True)
if chart_visual_class_balancing==True:
fig = plt.figure(figsize=(8, 4))
#sns.countplot(y=tweets.loc[:, 'type'],data=tweets).set_title("Count of tweets in each class")
fig = px.histogram(tweets, x="type",color="type",title="Count of tweets in each class")
st.plotly_chart(fig)
# Wordclouds
# Selection of Input & Output Variables
X = tweets.loc[:, 'text']
Y = tweets.loc[:, 'type']
X = list(X)
def preprocess_dataset(d):
# Define count variables
cnt=0
punctuation_count = 0
digit_count = 0
# Convert the corpus to lowercase
lower_corpus = []
for i in range(len(d)):
lower_corpus.append(" ".join([word.lower() for word in d[i].split()]))
# Remove any special symbol or punctuation
without_punctuation_corpus = []
for i in range(len(lower_corpus)):
p = []
for ch in lower_corpus[i]:
if ch not in string.punctuation:
p.append(ch)
else:
p.append(" ")
# Count of punctuation marks removed
punctuation_count += 1
x = ''.join(p)
if len(x) > 0:
without_punctuation_corpus.append(x)
# Remove urls with http, https or www and Retweets RT
without_url_corpus = []
for i in range(len(without_punctuation_corpus)):
text = without_punctuation_corpus[i]
text = re.sub(r"http\S*||www\S*", "", text)
text = re.sub(r"RT ", "", text)
without_url_corpus.append(text)
# Remove special characters and numbers from the corpus
without_digit_corpus = []
for i in range(len(without_url_corpus)):
p = []
for word in without_url_corpus[i].split():
if word.isalpha():
p.append(word)
else:
# Count of punctuation marks removed
digit_count += 1
x = ' '.join(p)
without_digit_corpus.append(x)
# Tokenize the corpus
# word_tokenize(s): Tokenize a string to split off punctuation other than periods
# With the help of nltk.tokenize.word_tokenize() method, we are able to extract the tokens
# from string of characters by using tokenize.word_tokenize() method.
# Tokenization was done to support efficient removal of stopwords
total_count = 0
tokenized_corpus = []
for i in without_digit_corpus:
tokenized_tweet = nltk.word_tokenize(i)
tokenized_corpus.append(tokenized_tweet)
# Count the length of tokenized corpus
total_count += len(list(tokenized_tweet))
# Remove Stopwords
stopw = stopwords.words('english')
count = 0
tokenized_corpus_no_stopwords = []
for i,c in enumerate(tokenized_corpus):
tokenized_corpus_no_stopwords.append([])
for word in c:
if word not in stopw:
tokenized_corpus_no_stopwords[i].append(word)
else:
count += 1
# lemmatization and removing words that are too large and small
lemmatized_corpus = []
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
ct = 0
cnt_final=0
dictt = {}
for i in range(0,len(tokenized_corpus_no_stopwords)):
lemmatized_corpus.append([])
for w in tokenized_corpus_no_stopwords[i]:
# lematizing only those words whose length >= 2 and <=10
# Considering words with length greater than or equal to 2 and less than or equal to 10
if(len(w)>2 and len(w)<=10):
lemmatized_corpus[i].append(lemmatizer.lemmatize(w))
cnt_final+=1
# Count of final corpus
# This is the length of total corpus that went through the process of lematization
ct+=1
############## Removing words of large and small length
# Doing a survey to find out the length of words so we can remove the too small and too large words from the Corpus
# plt.bar(*zip(*dictt.items()))
# plt.show()
# Punctuation Preprocessing
preprocessed_corpus = []
for i,c in enumerate(lemmatized_corpus):
preprocessed_corpus.append([])
for word in c:
x = ''.join([ch for ch in word if ch not in string.punctuation])
if len(x) > 0:
preprocessed_corpus[i].append(x)
# Clear unwanted data variables to save RAM due to memory limitations
del lower_corpus
del without_punctuation_corpus
del without_digit_corpus
del tokenized_corpus
del tokenized_corpus_no_stopwords
del lemmatized_corpus
return preprocessed_corpus
# Preprocess the Input Variables
preprocessed_corpus = preprocess_dataset(X)
data_corpus = []
for i in preprocessed_corpus:
data_corpus.append(" ".join([w for w in i]))
# Creating a word cloud
st.header("Wordclouds For Dataset")
fig, axes = plt.subplots(1, 2)
# Worcloud for processed dataset
words1 = ' '.join([tweet for tweet in X])
words2 = ' '.join([tweet for tweet in data_corpus])
wordCloud1 = WordCloud(background_color ='black').generate(words1)
wordCloud2 = WordCloud(background_color ='black').generate(words2)
# Display the generated image:
axes[0].title.set_text("Raw Dataset")
axes[0].imshow(wordCloud1)
axes[0].axis("off")
axes[1].title.set_text("Processed Dataset")
axes[1].imshow(wordCloud2)
axes[1].axis("off")
st.pyplot(fig)
# Create most used hashtags
st.header("Top Hashtag Used in the Datasets")
fig, axes = plt.subplots(1, 3)
tweets1 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/nepal_mix_1.csv")[['text','type']]
tweets2 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/italy_mix_1.csv")[['text','type']]
tweets3 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/Covid-19.csv")[['text','type']]
X1 = list(tweets1.loc[:, 'text'])
X2 = list(tweets2.loc[:, 'text'])
X3 = list(tweets3.loc[:, 'text'])
dc1 = []
pd1 = preprocess_dataset(X1)
for i in pd1:
dc1 += i
c1 = Counter(dc1)
mfw1 = c1.most_common(10)
df1 = pd.DataFrame(mfw1)
df1.columns = ['Word', 'Count']
axes[0] = px.line(df1, x='Word', y='Count',title='Nepal Earthquake 2015',labels={'Word':'Hashtag', 'Count':'Number of Hashtag tweeted'})
st.plotly_chart(axes[0])
dc2 = []
pd2 = preprocess_dataset(X2)
for i in pd2:
dc2 += i
c2 = Counter(dc2)
mfw2 = c2.most_common(10)
df2 = pd.DataFrame(mfw2)
df2.columns = ['Word', 'Count']
axes[1] = px.line(df2, x='Word', y='Count',title='Italy Earthquake 2016', labels={'Word':'Hashtag', 'Count':'Number of Hashtag tweeted'})
st.plotly_chart(axes[1])
dc3 = []
pd3 = preprocess_dataset(X3)
for i in pd3:
dc3 += i
c3 = Counter(dc3)
mfw3 = c3.most_common(10)
df3 = pd.DataFrame(mfw3)
df3.columns = ['Word', 'Count']
axes[2] = px.line(df3, x='Word', y='Count',title='COVID-19',labels={'Word':'Hashtag', 'Count':'Number of Hashtag tweeted'})
st.plotly_chart(axes[2])
#df3.set_index('Word', inplace=True)
#axes[2].plot(df3['Count'], marker='o', linewidth=0.5,ls='solid', c='blue')
#axes[2].tick_params(axis ='x', rotation =-90)
#axes[2].set_xlabel('Hashtag')
#axes[2].set_ylabel('Number of Hashtag tweeted')
#axes[2].title.set_text("COVID-19")
st.header("Sentiments of Tweets Collected")
st.caption("Select Start & End Date to display Sentiments of tweets collected")
s_date = st.date_input("Start Date", min_value=datetime.datetime(2021, 4, 1), max_value=datetime.datetime(2021, 4, 30), value=datetime.datetime(2021, 4, 1))
e_date = st.date_input("End Date", min_value=datetime.datetime(2021, 4, 1), max_value=datetime.datetime(2021, 4, 30), value=datetime.datetime(2021, 4, 30))
data = pd.read_csv('sentiment_april.csv')[['Need','Availability']]
data_T = data.T
date1 = int(str(s_date)[8:])-1
date2 = int(str(e_date)[8:])
data_T["sum"] = data_T[list(range(date1,date2,1))].sum(axis=1)
l_name = ['Need', 'Availability']
l_value = data_T['sum']
pie_dict = {'name': l_name, 'value': l_value}
pie_df = pd.DataFrame(pie_dict)
fig_pie = px.pie(pie_df, values='value', names='name', title='Sentiments of tweet collected between '+str(s_date)+' and '+str(e_date))
st.plotly_chart(fig_pie)
# Show locations for tweets
st.header("Map for Location of Each User")
df = pd.read_csv('lat-long.csv')
df.columns = ['lat', 'lon', 'country']
st.map(df)
elif control == 'Live Tweet Feed':
### Libe Tweet feed goes here
st.markdown('<h1 style="color:#E07C24;;text-align:center;"><b>Live Tweet Feed</b></h1>',unsafe_allow_html=True)
st.header("Live Tweet Feed Sample")
hashtag = str(st.text_input("Enter the keyword or hashtag for live twitter fee", "#coronavirus"))
fetch_tweets = st.button("Fetch Tweets")
####input your credentials here
consumer_key = "IE5dmFVlYdg5aNrsNnZiXZVPa"
consumer_secret = "<KEY>"
access_token = "<KEY>"
access_token_secret = "<KEY>"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit=True)
if fetch_tweets:
# Current Time
current_time = time.time()
diff = 0
real_time = 0
live_tweet_text = []
live_tweet_date = []
live_tweet_id = []
lt_user_name = []
lt_user_location = []
lt_user_screenname=[]
lt_followers = []
lt_following = []
while(diff < 10):
for tweet in tweepy.Cursor(api.search_tweets,q=hashtag,count=10,lang="en",since="2021-12-11").items():
real_time = time.time()
diff = real_time - current_time
if diff >10:
break
if (not tweet.retweeted) and ('RT @' not in tweet.text):
#print(tweet,"\n\n\n\n\n")
live_tweet_text.append(tweet.text)
live_tweet_date.append(tweet.created_at)
live_tweet_id.append(tweet.id)
lt_user_name.append(tweet.user.name)
lt_user_location.append(tweet.user.location)
lt_user_screenname.append(tweet.user.screen_name)
lt_followers.append(str(tweet.user.followers_count))
lt_following.append(str(tweet.user.friends_count))
live_tweet_feed_dict = {'Tweet ID':live_tweet_id, 'Tweet': live_tweet_text, 'Date & Time': live_tweet_date, 'Username': lt_user_screenname, 'User Full Name': lt_user_name, 'Location': lt_user_location, 'Follower Count': lt_followers, 'Following Count': lt_following}
live_tweet_feed = pd.DataFrame(live_tweet_feed_dict)
st.dataframe(live_tweet_feed)
elif control == 'Time Series Analysis':
### Streamlit code starts here
st.markdown('<h1 style="color:#E07C24;;text-align:center;"><b>Time Series Analysis of Disaster Tweets</b></h1>',unsafe_allow_html=True)
### Time Series Code goes here
# Dataset
# Load the Dataset
tweets1 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/nepal_mix_1.csv")[['text','type']]
tweets2 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/italy_mix_1.csv")[['text','type']]
tweets3 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/Covid-19.csv")[['text','type']]
names = [tweets1,tweets2,tweets3]
# Concatenate the datasets
tweets = pd.concat(names,ignore_index = True)
# Reshuffle the dataset
tweets = tweets.sample(frac = 1)
# Reindex the dataset
tweets['index'] = list(range(0,tweets.shape[0],1))
tweets.set_index('index', inplace=True)
tweets['type'] = tweets['type'].map({0: 'Need', 1: 'Availability', 2: 'Other'})
# Change column names for consistency
tweets.columns = ['text', 'type']
tweets['type'] = tweets['type'].map({'Need':0, 'Availability':1,'Other':2})
# Get all the labels used in the labelling column
label = tweets.type.unique()
print("Labels:", label)
# Remove label 2 from the list because not required for time series analysis
label = np.delete(label,np.where(label == 2))
print("Labels:", label)
# Add names to the numerical labels
label_name = []
for i in label:
if i == 0:
label_name.append("Need")
elif i == 1:
label_name.append("Availability")
# Choose interval
interval = 30
start_date = "2021-04-01"
# Create Timestamps with intervals
ds = | pd.date_range(start=start_date, periods=interval) | pandas.date_range |
from datetime import (
datetime,
time,
)
import numpy as np
import pytest
from pandas._libs.tslibs import timezones
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
class TestBetweenTime:
@td.skip_if_has_locale
def test_between_time_formats(self, frame_or_series):
# GH#11818
rng = | date_range("1/1/2000", "1/5/2000", freq="5min") | pandas.date_range |
import click
import sqlite3
import os
from shutil import copyfile
import pandas as pd
import numpy as np
from .preprocess import uniprot, net, sec, quantification, normalization, meta, query
from .score import monomer, scoring
from .learn import pyprophet, combine
from .quantify import quantitative_matrix, enrichment_test
from .plot import plot_features, check_sqlite_table
from pyprophet.data_handling import transform_threads, transform_pi0_lambda
# import cProfile
@click.group(chain=True)
@click.version_option()
def cli():
"""
SECAT: Size-Exclusion Chromatography Algorithmic Toolkit.
Visit https://github.com/grosenberger/secat for usage instructions and help.
"""
# SECAT import data
@cli.command()
@click.argument('infiles', nargs=-1, type=click.Path(exists=True))
@click.option('--out', 'outfile', required=True, type=click.Path(exists=False), help='Output SECAT file.')
# Reference files
@click.option('--sec', 'secfile', required=True, type=click.Path(exists=True), help='The input SEC calibration file.')
@click.option('--net', 'netfile', required=False, type=click.Path(exists=True), help='Reference binary protein-protein interaction file in STRING-DB or HUPO-PSI MITAB (2.5-2.7) format.')
@click.option('--posnet', 'posnetfile', required=False, type=click.Path(exists=True), help='Reference binary positive protein-protein interaction file in STRING-DB or HUPO-PSI MITAB (2.5-2.7) format.')
@click.option('--negnet', 'negnetfile', required=False, type=click.Path(exists=True), help='Reference binary negative protein-protein interaction file in STRING-DB or HUPO-PSI MITAB (2.5-2.7) format.')
@click.option('--uniprot', 'uniprotfile', required=True, type=click.Path(exists=True), help='Reference molecular weights file in UniProt XML format.')
@click.option('--columns', default=["run_id","sec_id","sec_mw","condition_id","replicate_id","run_id","protein_id","peptide_id","peptide_intensity"], show_default=True, type=(str,str,str,str,str,str,str,str,str), help='Column names for SEC & peptide quantification files')
# Parameters for normalization
@click.option('--normalize/--no-normalize', default=True, show_default=True, help='Normalize quantification data by sliding window cycling LOWESS normaklization.')
@click.option('--normalize_window','normalize_window', default=5, show_default=True, type=int, help='Number of SEC fractions per sliding window.')
@click.option('--normalize_padded/--no-normalize_padded', default=True, show_default=True, help='Use padding for first and last SEC fractions.')
# Parameters for decoys
@click.option('--decoy_intensity_bins', 'decoy_intensity_bins', default=1, show_default=True, type=int, help='Number of decoy bins for intensity.')
@click.option('--decoy_left_sec_bins', 'decoy_left_sec_bins', default=1, show_default=True, type=int, help='Number of decoy bins for left SEC fraction.')
@click.option('--decoy_right_sec_bins', 'decoy_right_sec_bins', default=1, show_default=True, type=int, help='Number of decoy bins for right SEC fraction.')
@click.option('--decoy_oversample','decoy_oversample', default=2, show_default=True, type=int, help='Number of iterations to sample decoys.')
@click.option('--decoy_subsample/--no-decoy_subsample', default=False, show_default=True, help='Whether decoys should be subsampled to be approximately of similar number as targets.')
@click.option('--decoy_exclude/--no-decoy_exclude', default=True, show_default=True, help='Whether decoy interactions also covered by targets should be excluded.')
@click.option('--min_interaction_confidence', 'min_interaction_confidence', default=0.0, show_default=True, type=float, help='Minimum interaction confidence for prior information from network.')
@click.option('--interaction_confidence_bins', 'interaction_confidence_bins', default=100, show_default=True, type=int, help='Number of interaction confidence bins for grouped error rate estimation.')
@click.option('--interaction_confidence_quantile/--no-interaction_confidence_quantile', default=True, show_default=True, help='Whether interaction confidence bins should be grouped by quantiles.')
def preprocess(infiles, outfile, secfile, netfile, posnetfile, negnetfile, uniprotfile, columns, normalize, normalize_window, normalize_padded, decoy_intensity_bins, decoy_left_sec_bins, decoy_right_sec_bins, decoy_oversample, decoy_subsample, decoy_exclude, min_interaction_confidence, interaction_confidence_bins, interaction_confidence_quantile):
"""
Import and preprocess SEC data.
"""
# Prepare output file
try:
os.remove(outfile)
except OSError:
pass
con = sqlite3.connect(outfile)
# Generate SEC definition table
click.echo("Info: Parsing SEC definition file %s." % secfile)
sec_data = sec(secfile, columns)
sec_data.to_df().to_sql('SEC', con, index=False)
# Generate Peptide quantification table
run_ids = sec_data.to_df()['run_id'].unique() # Extract valid run_ids from SEC definition table
quantification_list = []
for infile in infiles:
click.echo("Info: Parsing peptide quantification file %s." % infile)
quantification_list.append(quantification(infile, columns, run_ids).to_df())
quantification_data = pd.concat(quantification_list)
# Normalize quantitative data
if normalize:
click.echo("Info: Normalizing quantitative data.")
quantification_data = normalization(quantification_data, sec_data.to_df(), normalize_window, normalize_padded, outfile).to_df()
# Store quantification data
quantification_data.to_sql('QUANTIFICATION' ,con, index=False, if_exists='append')
# Generate peptide and protein meta data over all conditions and replicates
click.echo("Info: Generating peptide and protein meta data.")
meta_data = meta(quantification_data, sec_data.to_df(), decoy_intensity_bins, decoy_left_sec_bins, decoy_right_sec_bins)
meta_data.peptide_meta.to_sql('PEPTIDE_META', con, index=False)
meta_data.protein_meta.to_sql('PROTEIN_META', con, index=False)
# Generate UniProt table
click.echo("Info: Parsing UniProt XML file %s." % uniprotfile)
uniprot_data = uniprot(uniprotfile)
uniprot_data.to_df().to_sql('PROTEIN', con, index=False)
# Generate Network table
if netfile != None:
click.echo("Info: Parsing network file %s." % netfile)
else:
click.echo("Info: No reference network file was provided.")
net_data = net(netfile, uniprot_data, meta_data)
net_data.to_df().to_sql('NETWORK', con, index=False)
# Generate Positive Network table
if posnetfile != None:
click.echo("Info: Parsing positive network file %s." % posnetfile)
posnet_data = net(posnetfile, uniprot_data, meta_data)
posnet_data.to_df().to_sql('POSITIVE_NETWORK', con, index=False)
else:
posnet_data = None
# Generate Negative Network table
if negnetfile != None:
click.echo("Info: Parsing negative network file %s." % negnetfile)
negnet_data = net(negnetfile, uniprot_data, meta_data)
negnet_data.to_df().to_sql('NEGATIVE_NETWORK', con, index=False)
else:
negnet_data = None
# Generate interaction query data
click.echo("Info: Generating interaction query data.")
query_data = query(net_data, posnet_data, negnet_data, meta_data.protein_meta, min_interaction_confidence, interaction_confidence_bins, interaction_confidence_quantile, decoy_oversample, decoy_subsample, decoy_exclude)
query_data.to_df().to_sql('QUERY', con, index=False)
# Remove any entries that are not necessary (proteins not covered by LC-MS/MS data)
con.execute('DELETE FROM PROTEIN WHERE protein_id NOT IN (SELECT DISTINCT(protein_id) as protein_id FROM QUANTIFICATION);')
con.execute('DELETE FROM NETWORK WHERE bait_id NOT IN (SELECT DISTINCT(protein_id) as protein_id FROM QUANTIFICATION) OR prey_id NOT IN (SELECT DISTINCT(protein_id) as protein_id FROM QUANTIFICATION);')
con.execute('DELETE FROM SEC WHERE run_id NOT IN (SELECT DISTINCT(run_id) as run_id FROM QUANTIFICATION);')
con.execute('DELETE FROM QUERY WHERE bait_id NOT IN (SELECT DISTINCT(protein_id) as protein_id FROM QUANTIFICATION) OR prey_id NOT IN (SELECT DISTINCT(protein_id) as protein_id FROM QUANTIFICATION);')
# Add indices
con.execute('CREATE INDEX IF NOT EXISTS idx_protein_protein_id ON PROTEIN (protein_id);')
con.execute('CREATE INDEX IF NOT EXISTS idx_network_bait_id ON NETWORK (bait_id);')
con.execute('CREATE INDEX IF NOT EXISTS idx_network_prey_id ON NETWORK (prey_id);')
con.execute('CREATE INDEX IF NOT EXISTS idx_network_bait_id_prey_id ON NETWORK (bait_id, prey_id);')
con.execute('CREATE INDEX IF NOT EXISTS idx_quantification_run_id ON QUANTIFICATION (run_id);')
con.execute('CREATE INDEX IF NOT EXISTS idx_quantification_protein_id ON QUANTIFICATION (protein_id);')
con.execute('CREATE INDEX IF NOT EXISTS idx_quantification_peptide_id ON QUANTIFICATION (peptide_id);')
con.execute('CREATE INDEX IF NOT EXISTS idx_peptide_meta_peptide_id ON PEPTIDE_META (peptide_id);')
con.execute('CREATE INDEX IF NOT EXISTS idx_protein_meta_protein_id ON PROTEIN_META (protein_id);')
con.execute('CREATE INDEX IF NOT EXISTS idx_query_bait_id ON QUERY (bait_id);')
con.execute('CREATE INDEX IF NOT EXISTS idx_query_prey_id ON QUERY (prey_id);')
con.execute('CREATE INDEX IF NOT EXISTS idx_query_bait_id_prey_id ON QUERY (bait_id, prey_id);')
# con.execute('VACUUM;')
con.commit()
# Close connection to file
con.close()
click.echo("Info: Data successfully preprocessed and stored in %s." % outfile)
# SECAT score features
@cli.command()
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='Input SECAT file.')
@click.option('--out', 'outfile', required=False, type=click.Path(exists=False), help='Output SECAT file.')
@click.option('--monomer_threshold_factor', 'monomer_threshold_factor', default=2.0, show_default=True, type=float, help='Factor threshold to consider a feature a complex rather than a monomer.')
@click.option('--minimum_peptides', 'minimum_peptides', default=1, show_default=True, type=int, help='Minimum number of peptides required to score an interaction.')
@click.option('--maximum_peptides', 'maximum_peptides', default=3, show_default=True, type=int, help='Maximum number of peptides used to score an interaction.')
@click.option('--peakpicking', default='none', show_default=True, type=click.Choice(['none', 'detrend_zero', 'detrend_drop', 'localmax_conditions', 'localmax_replicates']), help='Either "none", "detrend_zero", "detrend_drop", "localmax_conditions" or "localmax_replicates"; the method for peakpicking of the peptide chromatograms. detrend_drop averages over all fractions with peptides; detrend_zero averages over all fractions (less agressive). localmax_conditions averages peak-picking over replicates of the same conditions; localmax_replicates conducts peak-picking for all samples separately.')
@click.option('--chunck_size', 'chunck_size', default=50000, show_default=True, type=int, help='Chunck size for processing.')
@click.option('--threads', default=1, show_default=True, type=int, help='Number of threads used for parallel processing. -1 means all available CPUs.', callback=transform_threads)
def score(infile, outfile, monomer_threshold_factor, minimum_peptides, maximum_peptides, peakpicking, chunck_size, threads):
"""
Score interaction features in SEC data.
"""
# Define outfile
if outfile is None:
outfile = infile
else:
copyfile(infile, outfile)
outfile = outfile
# Find monomer thresholds
click.echo("Info: Detect monomers.")
monomer_data = monomer(outfile, monomer_threshold_factor)
con = sqlite3.connect(outfile)
monomer_data.df.to_sql('MONOMER', con, index=False, if_exists='replace')
con.close()
# Signal processing
click.echo("Info: Signal processing.")
# Drop features if they already exist
con = sqlite3.connect(outfile)
c = con.cursor()
c.execute('DROP TABLE IF EXISTS FEATURE;')
con.close()
scoring(outfile, chunck_size, threads, minimum_peptides, maximum_peptides, peakpicking)
# cProfile.runctx('scoring(outfile, chunck_size, minimum_peptides, maximum_peptides, peakpicking)', globals(), locals = {'outfile': outfile, 'chunck_size': chunck_size, 'minimum_peptides': minimum_peptides, 'maximum_peptides': maximum_peptides, 'peakpicking': peakpicking}, filename="score_performance.cprof")
# SECAT learn features
@cli.command()
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='Input SECAT file.')
@click.option('--out', 'outfile', required=False, type=click.Path(exists=False), help='Output SECAT file.')
@click.option('--apply_model', 'apply_model', required=False, type=click.Path(exists=False), help='Apply pretrained SECAT model')
# Prefiltering
@click.option('--minimum_abundance_ratio', 'minimum_abundance_ratio', default=0.1, show_default=True, type=float, help='Minimum abundance ratio required to score an interaction.')
@click.option('--maximum_sec_shift', 'maximum_sec_shift', default=10, show_default=True, type=float, help='Maximum lag in SEC units between interactions and subunits.')
@click.option('--cb_decoys/--no-cb_decoys', default=False, show_default=True, help='Use only decoys from same confidence bin instead of full set for learning.')
# Semi-supervised learning
@click.option('--xeval_fraction', default=0.8, show_default=True, type=float, help='Data fraction used for cross-validation of semi-supervised learning step.')
@click.option('--xeval_num_iter', default=3, show_default=True, type=int, help='Number of iterations for cross-validation of semi-supervised learning step.')
@click.option('--ss_initial_fdr', default=0.1, show_default=True, type=float, help='Initial FDR cutoff for best scoring targets.')
@click.option('--ss_iteration_fdr', default=0.05, show_default=True, type=float, help='Iteration FDR cutoff for best scoring targets.')
@click.option('--ss_num_iter', default=10, show_default=True, type=int, help='Number of iterations for semi-supervised learning step.')
@click.option('--xgb_autotune/--no-xgb_autotune', default=False, show_default=True, help='Autotune hyperparameters after semi-supervised learning.')
# Statistics
@click.option('--parametric/--no-parametric', default=False, show_default=True, help='Do parametric estimation of p-values.')
@click.option('--pfdr/--no-pfdr', default=False, show_default=True, help='Compute positive false discovery rate (pFDR) instead of FDR.')
@click.option('--pi0_lambda', default=[0.01,0.5,0.01], show_default=True, type=(float, float, float), help='Use non-parametric estimation of p-values. Either use <START END STEPS>, e.g. 0.1, 1.0, 0.1 or set to fixed value, e.g. 0.4, 0, 0.', callback=transform_pi0_lambda)
@click.option('--pi0_method', default='bootstrap', show_default=True, type=click.Choice(['smoother', 'bootstrap']), help='Either "smoother" or "bootstrap"; the method for automatically choosing tuning parameter in the estimation of pi_0, the proportion of true null hypotheses.')
@click.option('--pi0_smooth_df', default=3, show_default=True, type=int, help='Number of degrees-of-freedom to use when estimating pi_0 with a smoother.')
@click.option('--pi0_smooth_log_pi0/--no-pi0_smooth_log_pi0', default=False, show_default=True, help='If True and pi0_method = "smoother", pi0 will be estimated by applying a smoother to a scatterplot of log(pi0) estimates against the tuning parameter lambda.')
@click.option('--lfdr_truncate/--no-lfdr_truncate', show_default=True, default=True, help='If True, local FDR values >1 are set to 1.')
@click.option('--lfdr_monotone/--no-lfdr_monotone', show_default=True, default=True, help='If True, local FDR values are non-decreasing with increasing p-values.')
@click.option('--lfdr_transformation', default='probit', show_default=True, type=click.Choice(['probit', 'logit']), help='Either a "probit" or "logit" transformation is applied to the p-values so that a local FDR estimate can be formed that does not involve edge effects of the [0,1] interval in which the p-values lie.')
@click.option('--lfdr_adj', default=1.5, show_default=True, type=float, help='Numeric value that is applied as a multiple of the smoothing bandwidth used in the density estimation.')
@click.option('--lfdr_eps', default=np.power(10.0,-8), show_default=True, type=float, help='Numeric value that is threshold for the tails of the empirical p-value distribution.')
@click.option('--plot_reports/--no-plot_reports', default=False, show_default=True, help='Plot reports for all confidence bins.')
@click.option('--threads', default=1, show_default=True, type=int, help='Number of threads used for parallel processing. -1 means all available CPUs.', callback=transform_threads)
@click.option('--test/--no-test', default=False, show_default=True, help='Run in test mode with fixed seed to ensure reproducibility.')
def learn(infile, outfile, apply_model, minimum_abundance_ratio, maximum_sec_shift, cb_decoys, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, xgb_autotune, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, plot_reports, threads, test):
"""
Learn true/false interaction features in SEC data.
"""
# Define outfile
if outfile is None:
outfile = infile
else:
copyfile(infile, outfile)
outfile = outfile
# Run PyProphet training
click.echo("Info: Running PyProphet.")
# Drop feature scores if they already exist
con = sqlite3.connect(outfile)
c = con.cursor()
c.execute('DROP TABLE IF EXISTS FEATURE_SCORED;')
con.close()
pyprophet(outfile, apply_model, minimum_abundance_ratio, maximum_sec_shift, cb_decoys, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, xgb_autotune, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, plot_reports, threads, test)
# Combine all replicates
click.echo("Info: Combine evidence across replicate runs.")
combined_data = combine(outfile, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, pfdr)
con = sqlite3.connect(outfile)
combined_data.df.to_sql('FEATURE_SCORED_COMBINED', con, index=False, if_exists='replace')
con.close()
# SECAT quantify features
@cli.command()
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='Input SECAT file.')
@click.option('--out', 'outfile', required=False, type=click.Path(exists=False), help='Output SECAT file.')
@click.option('--control_condition', default="center", type=str, help='Specify control condition identifier. Setting this parameter to "center" will compare all conditions against all and use the mean as reference for quantification.')
@click.option('--paired/--no-paired', default=False, show_default=True, help='Whether replicates should be paired, e.g. replicates 1 of conditions A & B were measured with heavy and light SILAC labels as part of the same runs.')
@click.option('--maximum_interaction_qvalue', default=0.05, show_default=True, type=float, help='Maximum q-value to consider interactions for quantification.')
@click.option('--min_abs_log2fx', default=1.0, show_default=True, type=float, help='Minimum absolute log2 fold-change for integrated nodes.')
@click.option('--minimum_peptides', 'minimum_peptides', default=1, show_default=True, type=int, help='Minimum number of peptides required to quantify an interaction.')
@click.option('--maximum_peptides', 'maximum_peptides', default=3, show_default=True, type=int, help='Maximum number of peptides used to quantify an interaction.')
@click.option('--missing_peptides', 'missing_peptides', default="zero", type=str, help='Whether missing peptide abundances should be set to 0 ("zero") or dropped ("drop") for fold change computation.')
@click.option('--peptide_log2fx/--no-peptide_log2fx', default=True, show_default=True, help='Whether peptide-level log2fx should be computed instead of protein-level. Protein-level is more robust if measured peptides are variable between conditions or replicates.')
@click.option('--threads', default=1, show_default=True, type=int, help='Number of threads used for parallel processing. -1 means all available CPUs.', callback=transform_threads)
def quantify(infile, outfile, control_condition, paired, maximum_interaction_qvalue, min_abs_log2fx, minimum_peptides, maximum_peptides, missing_peptides, peptide_log2fx, threads):
"""
Quantify protein and interaction features in SEC data.
"""
# Define outfile
if outfile is None:
outfile = infile
else:
copyfile(infile, outfile)
outfile = outfile
click.echo("Info: Prepare quantitative matrices.")
qm = quantitative_matrix(outfile, maximum_interaction_qvalue, minimum_peptides, maximum_peptides)
con = sqlite3.connect(outfile)
qm.monomer_peptide.to_sql('MONOMER_QM', con, index=False, if_exists='replace')
qm.complex_peptide.to_sql('COMPLEX_QM', con, index=False, if_exists='replace')
con.close()
click.echo("Info: Assess differential features.")
et = enrichment_test(outfile, control_condition, paired, min_abs_log2fx, missing_peptides, peptide_log2fx, threads)
con = sqlite3.connect(outfile)
et.edge.to_sql('EDGE', con, index=False, if_exists='replace')
et.edge_level.to_sql('EDGE_LEVEL', con, index=False, if_exists='replace')
et.node.to_sql('NODE', con, index=False, if_exists='replace')
et.node_level.to_sql('NODE_LEVEL', con, index=False, if_exists='replace')
et.protein_level.to_sql('PROTEIN_LEVEL', con, index=False, if_exists='replace')
con.close()
# SECAT export features
@cli.command()
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='Input SECAT file.')
@click.option('--maximum_interaction_qvalue', default=0.05, show_default=True, type=float, help='Maximum q-value to consider interactions for quantification.')
def export(infile, maximum_interaction_qvalue):
"""
Export SECAT results.
"""
outfile_interactions = os.path.splitext(infile)[0] + "_interactions.csv"
outfile_network = os.path.splitext(infile)[0] + "_network.csv"
outfile_nodes = os.path.splitext(infile)[0] + "_differential_nodes.csv"
outfile_nodes_level = os.path.splitext(infile)[0] + "_differential_nodes_level.csv"
outfile_edges = os.path.splitext(infile)[0] + "_differential_edges.csv"
outfile_edges_level = os.path.splitext(infile)[0] + "_differential_edges_level.csv"
outfile_proteins_level = os.path.splitext(infile)[0] + "_differential_proteins_level.csv"
con = sqlite3.connect(infile)
if check_sqlite_table(con, 'FEATURE_SCORED_COMBINED'):
interaction_data = pd.read_sql('SELECT DISTINCT bait_id, prey_id FROM FEATURE_SCORED_COMBINED WHERE decoy == 0 and qvalue <= %s;' % maximum_interaction_qvalue , con)
interaction_data.to_csv(outfile_interactions, index=False)
if check_sqlite_table(con, 'FEATURE_SCORED_COMBINED') and check_sqlite_table(con, 'MONOMER_QM'):
network_data = pd.read_sql('SELECT DISTINCT bait_id, prey_id FROM FEATURE_SCORED_COMBINED WHERE decoy == 0 and qvalue <= %s UNION SELECT DISTINCT bait_id, prey_id FROM MONOMER_QM;' % maximum_interaction_qvalue , con)
network_data.to_csv(outfile_network, index=False)
if check_sqlite_table(con, 'NODE'):
node_data = pd.read_sql('SELECT * FROM NODE;' , con)
node_data.sort_values(by=['pvalue']).to_csv(outfile_nodes, index=False)
if check_sqlite_table(con, 'NODE_LEVEL'):
node_level_data = pd.read_sql('SELECT * FROM NODE_LEVEL;' , con)
node_level_data.sort_values(by=['pvalue']).to_csv(outfile_nodes_level, index=False)
if check_sqlite_table(con, 'EDGE'):
edge_data = pd.read_sql('SELECT * FROM EDGE;' , con)
edge_data.sort_values(by=['pvalue']).to_csv(outfile_edges, index=False)
if check_sqlite_table(con, 'EDGE_LEVEL'):
edge_level_data = pd.read_sql('SELECT * FROM EDGE_LEVEL;' , con)
edge_level_data.sort_values(by=['pvalue']).to_csv(outfile_edges_level, index=False)
if check_sqlite_table(con, 'PROTEIN_LEVEL'):
protein_level_data = pd.read_sql('SELECT * FROM PROTEIN_LEVEL;' , con)
protein_level_data.sort_values(by=['pvalue']).to_csv(outfile_proteins_level, index=False)
con.close()
# SECAT plot chromatograms
@cli.command()
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='Input SECAT file.')
@click.option('--level', default='bait', show_default=True, type=click.Choice(['bait', 'interaction']), help='Plot either all interactions of bait proteins or individual interactions')
@click.option('--id', required=False, type=str, help='Plot specific UniProt bait_id (Q10000) or interaction_id (Q10000_P10000)')
@click.option('--max_qvalue', default=0.01, show_default=True, type=float, help='Maximum q-value to plot baits or interactions.')
@click.option('--min_abs_log2fx', default=1.0, show_default=True, type=float, help='Minimum absolute log2 fold-change for integrated nodes.')
@click.option('--mode', default='quantitative', show_default=True, type=click.Choice(['quantitative', 'detection']), help='Select mode to order interaction plots by.')
@click.option('--combined/--no-combined', default=False, show_default=True, help='Select interactions and baits according to combined q-values.')
@click.option('--peptide_rank', default=6, show_default=True, type=int, help='Number of most intense peptides to plot.')
def plot(infile, level, id, max_qvalue, min_abs_log2fx, mode, combined, peptide_rank):
"""
Plot SECAT results
"""
pf = plot_features(infile, level, id, max_qvalue, min_abs_log2fx, mode, combined, peptide_rank)
# SECAT print statistics
@cli.command()
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='Input SECAT file.')
@click.option('--min_abs_log2fx', default=1.0, show_default=True, type=float, help='Minimum absolute log2 fold-change for integrated nodes.')
def statistics(infile, min_abs_log2fx):
"""
Print SECAT statistics
"""
con = sqlite3.connect(infile)
if check_sqlite_table(con, 'QUANTIFICATION') and check_sqlite_table(con, 'SEC') and check_sqlite_table(con, 'PROTEIN'):
click.echo("Protein information")
click.echo(10*"-")
pepprot = | pd.read_sql('SELECT * FROM QUANTIFICATION INNER JOIN SEC ON QUANTIFICATION.run_id = SEC.run_id WHERE protein_id IN (SELECT DISTINCT protein_id FROM PROTEIN);' , con) | pandas.read_sql |
# Author: <NAME>
# Created on: April 2020
# Last modified on: Sep 2020
"""
This script pulls COVID 19 data and creates interactive plots of COVID 19 cases in the world.
"""
#!pip install pycountry_convert
#!pip install requests
#!pip install pandas
#!pip install plotly
import requests as r
import pandas as pd
import plotly.io as pio
import pycountry_convert as pc
import plotly.offline as offline
import plotly.graph_objs as go
import numpy as np
from ipywidgets import widgets
from IPython.display import display, Javascript, Markdown, HTML, clear_output
from ipywidgets import interact, interact_manual, widgets, Layout, VBox, HBox, Button,fixed,interactive
def extract_latest(final_df):
# This function gets latest for each country
conf_dic = {}
latest_arr = []
cont_code_arr = []
country_arr = []
for country in final_df['country']:
latest = float(final_df[final_df['country']==country]['latest'].sum())
cont_code = final_df[final_df['country']==country]['continent code'].unique()[0]
latest_arr.append(latest)
cont_code_arr.append(cont_code)
country_arr.append(country)
conf_dic['country'] = country_arr
conf_dic['continent code'] = cont_code_arr
conf_dic['latest'] = latest_arr
conf_df = pd.DataFrame(conf_dic)
return conf_df
def generate_levels(df,case_type):
# The sunburst plot requires weights (values), labels, and parent (region, or World)
# We build the corresponding table here
# Inspired and adapted from https://pypi.org/project/world-bank-data/
columns = ['labels','parents', 'values']
# Build the levels
# Level 1 - Countries
level1 = df.copy()
# Rename columns
level1.columns = columns
# Add a text column - format values column
level1['text'] = level1['values'].apply(lambda pop:' ' + str(case_type)+ ' Cases: {:,.0f}'.format(pop))
level1['World total'] = level1['values'].sum()
# Create level 2 - Continents
#Group by continent code
level2 = level1.groupby(['parents']).values.sum().reset_index()[['parents', 'parents', 'values']]
# Rename columns
level2.columns = columns
level2['parents'] = 'World'
# move value to text for this level
level2['text'] = level2['values'].apply(lambda pop: ' ' + str(case_type)+ ' Cases: {:,.0f}'.format(pop))
## Create level 3 - world total as of latest date
level3 = pd.DataFrame({'parents': ['World'], 'labels': ['World'],
'values': [level1.groupby("parents").sum().sum()[0]], 'text':['{:,.0f}'.format(level1.groupby("parents").sum().sum()[0])]})
#Create master dataframe with all levels
all_levels = pd.concat([level1,level2], axis=0,sort=True)
return all_levels
def plot_sunburst(df,case_type):
last_date = pd.to_datetime('today').date()
fig = offline.iplot(dict(
data=[dict(type='sunburst', hoverinfo='text', **df,name='Overview')],
layout=dict(title='COVID-19' + ' ' + str(case_type) + ' Cases as of ' + str(last_date),
width=800,height=800,insidetextorientation='radial')),validate=False)
return fig
# Define a function to drop the history.prefix
# Create function drop_prefix
def drop_prefix(self, prefix):
self.columns = self.columns.str.lstrip(prefix)
return self
# Call function
pd.core.frame.DataFrame.drop_prefix = drop_prefix
# Define function which removes history. prefix, and orders the column dates in ascending order
def order_dates(flat_df):
# Drop prefix
flat_df.drop_prefix('history.')
flat_df.drop_prefix("coordinates.")
# Isolate dates columns
flat_df.iloc[:,6:].columns = pd.to_datetime(flat_df.iloc[:,6:].columns)
# Transform to datetim format
sub = flat_df.iloc[:,6:]
sub.columns = pd.to_datetime(sub.columns)
# Sort
sub2 = sub.reindex(sorted(sub.columns), axis=1)
sub3 = flat_df.reindex(sorted(flat_df.columns),axis=1).iloc[:,-5:]
# Concatenate
final = | pd.concat([sub2,sub3], axis=1, sort=False) | pandas.concat |
import bs4 as bs
import urllib.request
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 10000)
pd.set_option('display.width', 1000)
source = urllib.request.urlopen('https://www.geonames.org/countries/').read()
soup = bs.BeautifulSoup(source, 'lxml')
table = soup.find('table', id='countries')
table_rows = table.find_all('tr')
rows = []
for tr in table_rows:
td = tr.find_all('td')
row = [i.text for i in td]
rows.append(row)
df = pd.DataFrame(rows, columns=['ISO2', 'ISO3', 'ISO3n', 'fips', 'country', 'capital', 'area', 'pop', 'continent'])
df = df.iloc[1:, :]
# keep columns to merge with datasets
merge_pop = df[['ISO2', 'ISO3', 'country']]
# Namibia cities
namibia = pd.read_csv('na.csv')
namibia = namibia.rename(columns={'city': 'asciiname', 'lat': 'latitude', 'lng': 'longitude',
'country': 'countries'})
namibia = namibia.drop(['iso2', 'admin_name', 'capital', 'population_proper'], axis=1)
namibia[['population', 'latitude', 'longitude']] = namibia[['population', 'latitude', 'longitude']].astype(str)
# read cities.csv
# error: https://stackoverflow.com/questions/18171739/unicodedecodeerror-when-reading-csv-file-in-pandas-with-python
cities = pd.read_csv('cities15000.csv', encoding='latin')
merge_cities = cities[['asciiname', 'latitude', 'longitude', 'country code', 'population']]
# read all_countries.csv
hfi = | pd.read_csv('./../hfi_cc_2021.csv') | pandas.read_csv |
from __future__ import division, print_function
import numpy as np
import pandas as pd
def get_kfold_split(N, k=4):
"""
Create groups used for k-fold cross validation.
Parameters
----------
N : number of samples to split
k : number of groups used for cross validation
Returns
-------
List of (index_train, index_test) pairs
"""
np.random.seed(2017)
idx = np.random.permutation(N)
index_pairs = [(np.ones(N).astype(np.bool),
np.zeros(N).astype(np.bool))
for _ in range(k)]
for i, fold_idx in enumerate(np.array_split(idx, k)):
index_pairs[i][0][fold_idx] = 0
index_pairs[i][1][fold_idx] = 1
return index_pairs
def benchmark(clf_factory, X, Y, clf_params_dict=None, k=4, verbose=False):
"""
benchmark a classifier on preprocessed data.
Parameters
----------
clf_factory :
Function which returns a classifier. Classifiers implement
a `fit` method and a `predict` method. The parameters
clf_params will be passed to clf_factory.
X : NxM matrix of features
Y : NxL matrix of binary values. Y[i,j] indicates whether or
not the j'th tag applies to the i'th article.
clf_params_dict :
dictionary of parameters passed to the classifier factory.
If None, no parameters are passed.
k : how many folds to use for cross validation
verbose : Should status be printed?
"""
if clf_params_dict is None:
clf_params_dict = {}
L = Y.shape[1]
fold_indexes = get_kfold_split(X.shape[0], k)
acc = np.zeros(k)
tpr = np.zeros((k, L))
fpr = np.zeros((k, L))
ppv = np.zeros((k, L))
clfs = []
for i, (idx_trn, idx_tst) in enumerate(fold_indexes):
if verbose:
print('step {} of {}...'.format(i, k), end='')
clf = clf_factory(**clf_params_dict)
x_trn = X[idx_trn, :]
y_trn = Y[idx_trn, :]
x_tst = X[idx_tst, :]
y_tst = Y[idx_tst, :]
clf.fit(x_trn, y_trn)
y_hat = clf.predict_proba(x_tst)
y_hat = y_hat > 0.5
y_hat.dtype = np.int8
y_tst.dtype = np.int8
acc[i] = (np.sum(y_tst == y_hat)) / float(y_tst.size)
for j in range(L):
tpr[i, j] = np.sum(y_tst[:, j] & y_hat[:, j]) / np.sum(y_tst[:, j])
fpr[i, j] = (np.sum(np.logical_not(y_tst[:, j]) & y_hat[:, j])
/ np.sum(np.logical_not(y_tst[:, j])))
ppv[i, j] = np.sum(y_tst[:, j] & y_hat[:, j]) / np.sum(y_hat[:, j])
clfs.append(clf)
if verbose:
print('done')
return {'acc': acc, 'tpr': tpr, 'fpr': fpr, 'ppv': ppv, 'clfs': clfs}
def predict_articles(clf, vectorizer, df, n=100, seed=1029384756):
np.random.seed(seed)
pd.set_option('display.max_columns', 100)
pd.set_option('display.float_format', lambda x: '%.6f' % x)
random_subset = np.random.choice(np.arange(df.shape[0]),
size=n,
replace=False)
preds = clf.predict_proba(vectorizer.transform(
df.iloc[random_subset, 3].values
))
preds = | pd.DataFrame(preds) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from src.create_initial_states.create_initial_conditions import (
_scale_up_empirical_new_infections,
)
from src.create_initial_states.create_initial_conditions import (
create_group_specific_share_known_cases,
)
from src.create_initial_states.create_initial_infections import (
_add_variant_info_to_infections,
)
from src.create_initial_states.create_initial_infections import (
_calculate_group_infection_probs,
)
@pytest.fixture
def empirical_infections():
start = pd.Timestamp("2020-09-30")
a_day = pd.Timedelta(days=1)
df = pd.DataFrame()
df["date"] = [start + i * a_day for i in range(5)] * 4
df = df.sort_values("date")
df.reset_index(drop=True, inplace=True)
df["county"] = list("AABB") * 5
df["age_group_rki"] = ["young", "old"] * 10
np.random.seed(3984)
df["newly_infected"] = np.random.choice([0, 1], 20)
sr = df.set_index(["date", "county", "age_group_rki"])
return sr
@pytest.fixture
def cases():
ind_tuples = [("A", "young"), ("A", "old"), ("B", "young"), ("B", "old")]
index = pd.MultiIndex.from_tuples(ind_tuples, names=["county", "age_group_rki"])
df = pd.DataFrame(index=index)
df["2020-10-01"] = [1, 0, 0, 1]
df["2020-10-02"] = [1, 1, 0, 0]
df["2020-10-03"] = [1, 0, 0, 1]
return df
@pytest.fixture
def synthetic_data():
df = pd.DataFrame()
df["county"] = list("AABBBBAAA")
df["age_group_rki"] = ["young"] * 4 + ["old"] * 5
return df
def test_calculate_group_infection_probs(synthetic_data, cases):
pop_size = 14
undetected_multiplier = 1.5
res = _calculate_group_infection_probs(
synthetic_data=synthetic_data,
cases=undetected_multiplier * cases,
population_size=pop_size,
)
expected_on_synthetic_data = pd.DataFrame(
index=synthetic_data.index, columns=cases.columns
)
group_shares = np.array([2, 2, 2, 2, 2, 2, 3, 3, 3]) / 9
scaled_up_group_sizes = pop_size * group_shares
p1 = (
undetected_multiplier
* np.array([1, 1, 0, 0, 1, 1, 0, 0, 0])
/ scaled_up_group_sizes
)
p2 = (
undetected_multiplier
* np.array([1, 1, 0, 0, 0, 0, 1, 1, 1])
/ scaled_up_group_sizes
)
p3 = (
undetected_multiplier
* np.array([1, 1, 0, 0, 1, 1, 0, 0, 0])
/ scaled_up_group_sizes
)
expected_on_synthetic_data["2020-10-01"] = p1
expected_on_synthetic_data["2020-10-02"] = p2
expected_on_synthetic_data["2020-10-03"] = p3
expected = expected_on_synthetic_data.loc[[0, 2, 4, 6]]
expected.index = pd.MultiIndex.from_tuples(
[("A", "young"), ("B", "young"), ("B", "old"), ("A", "old")]
)
expected.index.names = ["county", "age_group_rki"]
pdt.assert_frame_equal(res.sort_index(), expected.sort_index())
def test_add_variant_info_to_infections():
df = pd.DataFrame()
dates = [pd.Timestamp("2021-03-14"), | pd.Timestamp("2021-03-15") | pandas.Timestamp |
"""
To run this flow:
```python forecasting_flow.py --environment=conda run```
"""
from functools import partial
from metaflow import (
Flow,
FlowSpec,
IncludeFile,
Parameter,
batch,
conda,
conda_base,
get_metadata,
parallel_map,
step,
)
from pip_decorator import pip
from forecasting_models import GluonTSModel, KatsModel, NeuralProphetModel, MerlionModel
# this version is used in pre and post processing steps
PANDAS_VERSION = "1.3.3"
# this version is used when conda packages aren't available
PIP_VERSION = "21.3.1"
def run_model(
model_config, wrapper_class, target_index, forecast_steps, train_df, data_freq
):
try:
model = wrapper_class(
model_config, target_index, forecast_steps, data_freq=data_freq
)
model.fit(train_df)
forecast = model.predict(train_df)
forecast["id"] = model_config["id"]
return forecast
except:
print(f"Error with {model_config}")
raise
@conda_base(python="3.8.12")
class ForecastingFlow(FlowSpec):
"""
A flow for benchmarking forecasting libraries.
"""
train_path = Parameter(
"train_path",
help="The path to a DataFrame file for training",
default="https://jgoode.s3.amazonaws.com/ts-datasets/seattle-trail.csv",
)
test_path = Parameter(
"test_path",
help="The path to a DataFrame file for testing",
default=None,
)
date_col = Parameter(
"date_col",
help="Column of the date in the input DataFrame",
default="Date",
)
target_col = Parameter(
"target_col",
help="Column of the target in the input DataFrame",
default="BGT North of NE 70th Total",
)
# data_config_path = Parameter(
# "data_config_path",
# help=
model_config_path = Parameter(
"model_config_path",
help="The path to a model config file",
default="../configs/forecasting/models/default.yaml",
)
forecast_steps = Parameter(
"forecast_steps",
help="The number of steps ahead to forecast",
default=10,
)
@conda(libraries={"pandas": PANDAS_VERSION, "pyyaml": "6.0"})
@step
def start(self):
"""
Start the flow by preprocessing the data.
"""
import pandas as pd
from pprint import pprint
import yaml
# Print the Metaflow metadata provider
print(f"Using metadata provider: {get_metadata()}")
def load_df(path):
df = | pd.read_csv(path) | pandas.read_csv |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), | Timedelta(days=1, hours=4) | pandas.Timedelta |
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)),
)
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{
"group": [1, 1, 2],
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"category_string": pd.Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": pd.date_range("20130101", periods=3),
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
},
columns=[
"group",
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
],
)
expected_columns_numeric = Index(["int", "float", "category_int"])
# mean / median
expected = pd.DataFrame(
{
"category_int": [7.5, 9],
"float": [4.5, 6.0],
"timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
"int": [1.5, 3],
"datetime": [
pd.Timestamp("2013-01-01 12:00:00"),
pd.Timestamp("2013-01-03 00:00:00"),
],
"datetimetz": [
pd.Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
pd.Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
],
},
index=Index([1, 2], name="group"),
columns=["int", "float", "category_int", "datetime", "datetimetz", "timedelta"],
)
for attr in ["mean", "median"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(
[
"int",
"float",
"string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["min", "max"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(
[
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["first", "last"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "string", "category_int", "timedelta"])
result = df.groupby("group").sum()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = df.groupby("group").sum(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int"])
for attr in ["prod", "cumprod"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
for attr in ["cummin", "cummax"]:
result = getattr(df.groupby("group"), attr)()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int", "timedelta"])
result = getattr(df.groupby("group"), "cumsum")()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), "cumsum")(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], columns=["A", "B", "C"]
)
g = df.groupby("A")
gni = df.groupby("A", as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1, 0.0], [3, np.nan]], columns=["A", "B"], index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name="A")
expected_col = pd.MultiIndex(
levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]],
codes=[[0] * 8, list(range(8))],
)
expected = pd.DataFrame(
[
[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
],
index=expected_index,
columns=expected_col,
)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat(
[
df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T,
]
)
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame(
[[True, True], [False, True]], columns=["B", "C"], index=[1, 3]
)
expected.index.name = "A"
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"])
expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"])
result = df.groupby("A").cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby("A", as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby("A").cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]
)
@pytest.mark.parametrize(
"method,data",
[
("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("nth", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}], "args": [1]}),
("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}),
],
)
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]
)
df["b"] = df.b.astype(dtype)
if "args" not in data:
data["args"] = []
if "out_type" in data:
out_type = data["out_type"]
else:
out_type = dtype
exp = data["df"]
df_out = pd.DataFrame(exp)
df_out["b"] = df_out.b.astype(out_type)
df_out.set_index("a", inplace=True)
grpd = df.groupby("a")
t = getattr(grpd, method)(*data["args"])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize(
"i",
[
(
Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448"),
),
(24650000000000001, 24650000000000002),
],
)
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = | pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}]) | pandas.DataFrame |
import os
import sys
sys.path.insert(0, os.getcwd())
import numpy as np
import pandas as pd
import dreamtools as dt
@np.vectorize
def approximately_equal(x, y, ndigits=0):
return round(x, ndigits) == round(y, ndigits)
def test_gdx_read():
db = dt.Gdx("test.gdx")
assert approximately_equal(db["qY"]["byg", 2010], 191)
assert approximately_equal(db["qI_s"]["IB", "fre", 2010], 4.43) # "IB" should be changed to "iB" in the GDX file.
assert approximately_equal(db["eCx"], 1)
assert db["fp"] == 1.0178
assert all(approximately_equal(db["inf_factor"], db["fp"]**(2010 - db["inf_factor"].index)))
assert db["s"].name == "s_"
assert db.vHh.loc["Net","tot",1970] == 5e-324
assert set(db["vHh"].index.get_level_values("a_")).issubset(set(db["a_"]))
def test_create_set_from_index():
db = dt.GamsPandasDatabase()
t = pd.Index(range(2010, 2026), name="t")
db.create_set("t", t)
assert db["t"].name == "t"
assert all(db["t"] == t)
assert db.symbols["t"].domains_as_strings == ["*"]
assert db.t.domains == ["*"]
db.create_set("tsub", t[5:], domains=["t"])
assert db["tsub"].name == "tsub"
assert all(db["tsub"] == t[5:])
assert db.symbols["tsub"].domains_as_strings == ["t"]
assert db.tsub.domains == ["t"]
s = pd.Index(["services", "goods"], name="s")
st = pd.MultiIndex.from_product([s, t], names=["s", "t"])
db.create_set("st", st)
assert db["st"].name == "st"
assert all(db["st"] == st[:])
assert db.symbols["st"].domains_as_strings == ["s", "t"]
assert db.st.domains == ["s", "t"]
def test_add_parameter_from_dataframe():
db = dt.GamsPandasDatabase()
df = pd.DataFrame()
df["t"] = range(2010, 2026)
df["value"] = 1.3
db.add_parameter_from_dataframe("par", df, add_missing_domains=True)
assert all(db["par"] == 1.3)
assert len(db["par"]) == 16
def test_multiply_added():
db = dt.GamsPandasDatabase()
df = pd.DataFrame([
[2010, "ser", 3],
[2010, "goo", 2],
[2020, "ser", 6],
[2020, "goo", 4],
], columns=["t", "s", "value"])
db.add_parameter_from_dataframe("q", df, add_missing_domains=True)
df = pd.DataFrame([
[2010, 1],
[2020, 1.2],
], columns=["t", "value"])
db.add_parameter_from_dataframe("p", df, add_missing_domains=True)
v = db["p"] * db["q"]
v.name = "v"
db.add_parameter_from_series(v)
assert db["v"][2020, "goo"] == 4.8
def test_add_parameter_from_series():
db = dt.GamsPandasDatabase()
t = pd.Index(range(2010, 2026), name="t")
par = pd.Series(1.4, index=t, name="par")
db.add_parameter_from_series(par, add_missing_domains=True)
assert all(db["par"] == 1.4)
assert len(db["par"]) == 16
ss = pd.Index(["foo"], name="ss")
singleton = pd.Series(1.4, index=ss, name="singleton")
db.add_parameter_from_series(singleton, add_missing_domains=True)
assert db["singleton"]["foo"] == 1.4
assert len(db["singleton"]) == 1
scalar = pd.Series(1.4, name="scalar")
db.add_parameter_from_series(scalar)
assert all(db["scalar"] == 1.4)
assert len(db["scalar"]) == 1
def test_create_variable():
db = dt.GamsPandasDatabase()
db.create_variable("scalar", data=3.2)
assert db.scalar == 3.2
db.create_variable("vector", data=[1, 2], index=pd.Index(["a", "b"], name="ab"), add_missing_domains=True)
assert all(db.vector == [1, 2])
db.create_variable("dataframe",
data=pd.DataFrame([
[2010, "ser", 3],
[2010, "goo", 2],
[2020, "ser", 6],
[2020, "goo", 4],
], columns=["t", "s", "value"]),
add_missing_domains=True
)
db.export("test_export.gdx")
assert dt.Gdx("test_export.gdx")["scalar"] == 3.2
assert all(dt.Gdx("test_export.gdx")["vector"] == [1, 2])
assert all(db.s == ["ser", "goo"])
assert all(db.t == [2010, 2020])
def test_create_parameter():
db = dt.GamsPandasDatabase()
db.create_parameter("scalar", data=3.2)
assert db.scalar == 3.2
db.create_parameter("vector", data=[1, 2], index=pd.Index(["a", "b"], name="ab"), add_missing_domains=True)
assert all(db.vector == [1, 2])
db.create_parameter("dataframe",
data=pd.DataFrame([
[2010, "ser", 3],
[2010, "goo", 2],
[2020, "ser", 6],
[2020, "goo", 4],
], columns=["t", "s", "value"]),
add_missing_domains=True
)
db.export("test_export.gdx")
assert dt.Gdx("test_export.gdx")["scalar"] == 3.2
assert all(dt.Gdx("test_export.gdx")["vector"] == [1, 2])
assert all(db.s == ["ser", "goo"])
assert all(db.t == [2010, 2020])
def test_add_variable_from_series():
db = dt.GamsPandasDatabase()
t = pd.Index(range(2010, 2026), name="t")
var = pd.Series(1.4, index=t, name="var")
db.add_variable_from_series(var, add_missing_domains=True)
assert all(db["var"] == 1.4)
assert len(db["var"]) == 16
def test_add_variable_from_dataframe():
db = dt.GamsPandasDatabase()
df = pd.DataFrame([
[2010, "ser", 3],
[2010, "goo", 2],
[2020, "ser", 6],
[2020, "goo", 4],
], columns=["t", "s", "value"])
db.add_variable_from_dataframe("q", df, add_missing_domains=True)
assert all(db.t == [2010, 2020])
assert all(db.s == ["ser", "goo"])
def test_multiply_with_different_sets():
assert approximately_equal(
sum(dt.Gdx("test.gdx")["qBNP"] * dt.Gdx("test.gdx")["qI"] * dt.Gdx("test.gdx")["qI_s"]),
50730260150
)
def test_export_with_no_changes():
dt.Gdx("test.gdx").export("test_export.gdx", relative_path=True)
assert round(os.stat("test.gdx").st_size, -5) == round(os.stat("test_export.gdx").st_size, -5)
def test_export_variable_with_changes():
db = dt.Gdx("test.gdx")
db["qY"] = db["qY"] * 2
db.export("test_export.gdx", relative_path=True)
old, new = dt.Gdx("test.gdx"), dt.Gdx("test_export.gdx")
assert all(old["qY"] * 2 == new["qY"])
def test_export_scalar_with_changes():
db = dt.Gdx("test.gdx")
db["eCx"] = db["eCx"] * 2
db.export("test_export.gdx", relative_path=True)
old, new = dt.Gdx("test.gdx"), dt.Gdx("test_export.gdx")
assert approximately_equal(old["eCx"] * 2, new["eCx"])
def test_export_set_with_changes():
db = dt.Gdx("test.gdx")
db["s"].texts["tje"] = "New text"
db.export("test_export.gdx", relative_path=True)
assert dt.Gdx("test_export.gdx")["s"].texts["tje"] == "New text"
def test_copy_set():
db = dt.Gdx("test.gdx")
db["alias"] = db["s"]
db["alias"].name = "alias"
index = db["alias"]
domains = ["*" if i in (None, index.name) else i for i in db.get_domains_from_index(index, index.name)]
db.database.add_set_dc(index.name, domains, "")
index = index.copy()
index.domains = domains
db.series[index.name] = index
db.export("test_export.gdx", relative_path=True)
assert all(dt.Gdx("test_export.gdx")["alias"] == db["s"])
def test_export_added_variable():
db = dt.Gdx("test.gdx")
db.create_variable("foo", [db.a, db.t], explanatory_text="Variable added from Python.")
db["foo"] = 42
db.export("test_export.gdx", relative_path=True)
assert all(approximately_equal(dt.Gdx("test_export.gdx")["foo"], 42))
def test_export_NAs():
db = dt.GamsPandasDatabase()
t = db.create_set("t", range(5))
p = db.create_parameter("p", t)
assert len(db["p"]) == 5
assert len(db.symbols["p"]) == 0
db.export("test_export.gdx")
db = dt.Gdx("test_export.gdx")
assert all(pd.isna(db["p"]))
assert len(db["p"]) == 0
assert len(db.symbols["p"]) == 0
def test_detuple():
assert dt.GamsPandasDatabase.detuple("aaa") == "aaa"
assert dt.GamsPandasDatabase.detuple(("aaa",)) == "aaa"
assert list(dt.GamsPandasDatabase.detuple(("aaa", "bbb"))) == ["aaa", "bbb"]
assert dt.GamsPandasDatabase.detuple(1) == 1
assert list(dt.GamsPandasDatabase.detuple([1, 2])) == [1, 2]
def test_create_methods():
# Create empty GamsPandasDatabase and alias creation methods
db = dt.GamsPandasDatabase()
Par, Var, Set = db.create_parameter, db.create_variable, db.create_set
# Create sets from scratch
t = Set("t", range(2000, 2021), "Årstal")
s = Set("s", ["tjenester", "fremstilling"], "Brancher", ["Tjenester", "Fremstilling"])
st = Set("st", [s, t], "Branche x år dummy")
sub = Set("sub", ["tjenester"], "Subset af brancher", domains=["s"])
one2one = Set("one2one", [(2010, 2015), (2011, 2016)], "1 til 1 mapping", domains=["t", "t"])
one2many = Set("one2many",
[("tot", "tjenester"), ("tot", "fremstilling")],
"1 til mange mapping", domains=["*", "s"],
)
assert one2many.name == "one2many"
assert one2many.names == ["*", "s"]
assert one2many.domains == ["*", "s"]
# Create parameters and variables based on zero ore more sets
gq = Par("gq", None, "Produktivitets-vækst", 0.01)
fq = Par("fp", t, "Vækstkorrektionsfaktor", (1 + 0.01)**(t-2010))
d = Par("d", st, "Dummy")
y = Var("y", [s,t], "Produktion")
assert gq == 0.01
assert all(fq.loc[2010:2011] == [1, 1.01])
assert | pd.isna(d["tjenester",2010]) | pandas.isna |
# -*- coding: utf-8 -*-
import pandas as pd
import os
import sys
import numpy as np
# Load datasets
ids = [ name for name in os.listdir(".") if os.path.isdir(name) ]
ids_decap = [s[:-1] if s[-6:] in ["mirnaA","mirnaB"] else s for s in ids]
# Map samples
metadata = | pd.read_csv(sys.argv[1]) | pandas.read_csv |
#!/usr/bin/env python3
import argparse
from datetime import datetime
import dateutil.parser
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
from common import *
def to_datetime(date_string):
return dateutil.parser.parse(date_string)
def to_truncdate(datetime_obj):
format = "%Y-%m-%d"
return datetime.strptime(datetime_obj.strftime(format),
format)
def main(args):
contents = read_lastb_db_contents(args.lastb_db_filename)
df = | pd.DataFrame.from_records(contents) | pandas.DataFrame.from_records |
import argparse
import os
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import train_test_split
tqdm.pandas()
parser = argparse.ArgumentParser()
parse.add_argument("--mimic_dir", help="The directory contaning all the required MIMIC files (ADMISSIONS, PATIENTS, DIAGNOSES_ICD, PROCEDURES_ICD, NOTEEVENTS).")
parse.add_argument("--save_dir", help="The directory where you want to save the processed files.")
args = parser.parse_args()
# ## Load data and clean admission
raw_adm = pd.read_csv(args.mimic_dir + "ADMISSIONS.csv.gz")
raw_patients = | pd.read_csv(args.mimic_dir + 'PATIENTS.csv.gz') | pandas.read_csv |
import pandas as pd
import nltk
import re
from nltk.stem import wordnet
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk import pos_tag
from sklearn.metrics import pairwise_distances
from nltk import word_tokenize
from nltk.corpus import stopwords
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
nltk.download('stopwords')
data_frame = pd.read_excel('data/dialog_talk_agent.xlsx')
data_frame.ffill(axis=0, inplace=True)
def text_normalization(text: str) -> str:
text = str(text).lower()
char_text = re.sub(r'[^ a-z]', '', text)
tokens = word_tokenize(char_text)
lemma = wordnet.WordNetLemmatizer()
tags_list = pos_tag(tokens)
lemma_words = []
for token, pos_token in tags_list:
if pos_token.startswith('V'):
pos_val = 'v'
elif pos_token.startswith('J'):
pos_val = 'a'
elif pos_token.startswith('R'):
pos_val = 'r'
else:
pos_val = 'n'
lemma_token = lemma.lemmatize(token, pos_val)
lemma_words.append(lemma_token)
return ' '.join(lemma_words)
data_frame['lemmatized_text'] = data_frame['Context'].apply(text_normalization)
tfidf = TfidfVectorizer()
x_tfidf = tfidf.fit_transform(data_frame['lemmatized_text']).toarray()
df_tfidf = pd.DataFrame(x_tfidf, columns=tfidf.get_feature_names())
def chat_tfidf(text: str) -> str:
lemma = text_normalization(text)
tf = tfidf.transform([lemma]).toarray()
cos = 1 - pairwise_distances(df_tfidf, tf, metric='cosine')
index_val = cos.argmax()
return data_frame['Text Response'].loc[index_val]
def _stopword(text: str) -> str:
tag_list = pos_tag(nltk.word_tokenize(text))
stop = stopwords.words('english')
lemma = wordnet.WordNetLemmatizer()
lemma_word = []
for token, pos_token in tag_list:
if token in stop:
continue
if pos_token.startswith('V'):
pos_val = 'v'
elif pos_token.startswith('J'):
pos_val = 'a'
elif pos_token.startswith('R'):
pos_val = 'r'
else:
pos_val = 'n'
lemma_token = lemma.lemmatize(token, pos_val)
lemma_word.append(lemma_token)
return ' '.join(lemma_word)
cv = CountVectorizer()
X = cv.fit_transform(data_frame['lemmatized_text']).toarray()
features = cv.get_feature_names()
data_frame_bow = | pd.DataFrame(X, columns=features) | pandas.DataFrame |
from __future__ import print_function
from fileinput import filename
import os
import pandas as pd
import pdb
from datetime import timedelta
import datetime
import shutil
date_time_format = '%Y-%m-%dT%H:%M:%S.%f'
date_format = '%Y-%m-%d'
def make_dir(data_path):
if os.path.exists(data_path) is False:
os.mkdir(data_path)
def check_micu_data_valid(data_time, start_date1, end_date1, start_date2, end_date2):
cond1 = (pd.to_datetime(data_time) - pd.to_datetime(start_date1)).total_seconds() >= 0
cond2 = (pd.to_datetime(end_date1) - pd.to_datetime(data_time)).total_seconds() >= 0
cond3 = False
cond4 = False
if start_date2 != 'nan':
cond3 = (pd.to_datetime(data_time) - pd.to_datetime(start_date2)).total_seconds() >= 0
cond4 = ( | pd.to_datetime(end_date2) | pandas.to_datetime |
import pandas as pd
import itertools
import sys
import time
import multiprocessing
import numpy as np
global num_compl
num_compl = 0
def acc_for_multiplier(RMSEs, mult):
heal_RMSE = RMSEs[0]
ibd_RMSE = RMSEs[1]
delta = heal_RMSE - (ibd_RMSE * mult)
delta = ['Healthy' if i < 0 else 'IBD' for i in delta]
return(delta)
def return_tax_names(ID_to_taxon, profiles):
tax_code = {}
with open(ID_to_taxon) as f:
for line in f.read().splitlines():
temp = line.split()
# tax ID is in the 1st column tax name -- 2nd
tax_code[temp[0]] = temp[1]
profiles.index = [tax_code[x] for x in profiles.index]
return(profiles)
def RMSE_calc(TEMP):
i = TEMP[0]
params = TEMP[1]
global num_compl
num_compl += 1
if num_compl % 100 == 0:
print(str(num_compl) + ' out of ' + str(params.shape[0]))
def profile_triple_SS(profile, models):
SS = []
for i in range(models.shape[0]):
temp = models.iloc[i]
try:
pred = float(temp['Coef_p1']) * profile.loc[temp['Predictor1']] + \
float(temp['Coef_p2']) * profile.loc[temp['Predictor2']] + float(temp['Intercept'])
SS.append((profile.loc[temp['Response']] - pred) ** 2)
except KeyError:
continue
return (SS)
def profile_pair_SS(profile, models):
SS = []
for i in models.index:
temp = models.iloc[i]
try:
pred = temp['Coef_p'] * profile.loc[temp.loc['Predictor']] + temp['Intercept']
SS.append((profile.loc[temp['Response']] - pred) ** 2)
except KeyError:
continue
return (SS)
def RMSE_test_profile(do_models_heal, tr_models_heal, do_models_ibd, tr_models_ibd, profile):
heal_tr_ss = profile_triple_SS(profile, tr_models_heal)
heal_do_ss = profile_pair_SS(profile, do_models_heal)
heal_SS = list(heal_tr_ss) + list(heal_do_ss)
heal_RMSE = (sum(heal_SS) / len(heal_SS)) ** (0.5)
ibd_tr_ss = profile_triple_SS(profile, tr_models_ibd)
ibd_do_ss = profile_pair_SS(profile, do_models_ibd)
ibd_SS = list(ibd_tr_ss) + list(ibd_do_ss)
ibd_RMSE = (sum(ibd_SS) / len(ibd_SS)) ** (0.5)
# delta = heal_RMSE-(ibd_RMSE*multiplier)
# delta = ['Healthy' if i <0 else 'IBD' for i in delta]
return (heal_RMSE, ibd_RMSE)
H_Tt = str(params.loc[i, 'H_Taxon_trim'])
H_Sc = str(params.loc[i, 'H_SparCC_cor'])
H_Sp = str(params.loc[i, 'H_SparCC_pval'])
H_Ta = str(int(params.loc[i, 'H_Tax_adjacency']))
H_Pf = str(params.loc[i, 'H_Pair_fstat'])
#
I_Tt = str(params.loc[i, 'I_Taxon_trim'])
I_Sc = str(params.loc[i, 'I_SparCC_cor'])
I_Sp = str(params.loc[i, 'I_SparCC_pval'])
I_Ta = str(int(params.loc[i, 'I_Tax_adjacency']))
I_Pf = str(params.loc[i, 'I_Pair_fstat'])
# print('H_Tt: ' + str(H_Tt))
# print('H_Sc: ' + str(H_Sc))
# print('H_Sp: ' + str(H_Sp))
# print('H_Ta: ' + str(H_Ta))
# print('H_Pf: ' + str(H_Pf))
# print('I_Tt: ' + str(I_Tt))
# print('I_Sc: ' + str(I_Sc))
# print('I_Sp: ' + str(I_Sp))
# print('I_Ta: ' + str(I_Ta))
# print('I_Pf: ' + str(I_Pf))
template = '{}/trim_{}/pval_{}/dist_{}_cor_{}/fstat_{}'
paths = {k: template.format(x, Tt, Sp, Ta, Sc, Pf)
for k, x, Tt, Sp, Ta, Sc, Pf in
(
('ibd', ibd_folders, I_Tt, I_Sp, I_Ta, I_Sc, I_Pf),
('heal', heal_folders, H_Tt, H_Sp, H_Ta, H_Sc, H_Pf)
)}
p_tax = {k: v for k, v in (
('ibd', ibd_folders + '/trim_%s/tax_code.txt' % I_Tt),
('heal', heal_folders + '/trim_%s/tax_code.txt' % H_Tt)
)}
p_do_models = {k: v for k, v in (
('ibd', paths['ibd'] + '/pair_models_{}.txt'.format(I_Pf)),
('heal', paths['heal'] + '/pair_models_{}.txt'.format(H_Pf))
)}
p_tr_models = {k: v for k, v in (
('ibd', paths['ibd'] + '/triplet_models_{}.txt'.format(I_Pf)),
('heal', paths['heal'] + '/triplet_models_{}.txt'.format(H_Pf))
)}
p_test_profile = {k: v for k, v in (
('ibd', ibd_folders + '/trim_{}/{}.txt'.format(I_Tt, test_or_valid)),
('heal', heal_folders + '/trim_{}/{}.txt'.format(H_Tt, test_or_valid))
)}
try:
with open(p_tr_models['ibd']) as f:
if f.readline() == 'No pair models to base triple models on':
print('No pair models to base triple models on')
params.loc[i, 'Sensitivity'] = -1
params.loc[i, 'Specificity'] = -1
sens = -1
spec = -1
return (i, sens, spec)
with open(p_tr_models['heal']) as f:
if f.readline() == 'No pair models to base triple models on':
print('No pair models to base triple models on')
params.loc[i, 'Sensitivity'] = -1
params.loc[i, 'Specificity'] = -1
sens = -1
spec = -1
return (i, sens, spec)
do_models = {k: pd.read_table(v, header=0, index_col=None, sep='\t', engine="python") for k, v in p_do_models.items()}
tr_models = {k: pd.read_table(v, header=0, index_col=None, sep='\t', engine="python") for k, v in p_tr_models.items()}
test_profile = {k: | pd.read_table(v, header=0, index_col=0, sep='\t', engine="python") | pandas.read_table |
# -*- coding: utf-8 -*-
import datetime as dt, IPython, pandas as pd, pyarrow as pa, pytest, requests, unittest
from builtins import object
from common import NoAuthTestCase
import graphistry
from mock import patch
triangleEdges = pd.DataFrame({'src': ['a', 'b', 'c'], 'dst': ['b', 'c', 'a']})
triangleNodes = pd.DataFrame({'id': ['a', 'b', 'c'], 'a1': [1, 2, 3], 'a2': ['red', 'blue', 'green']})
triangleNodesRich = pd.DataFrame({
'id': ['a', 'b', 'c'],
'a1': [1, 2, 3],
'a2': ['red', 'blue', 'green'],
'a3': [True, False, False],
'a4': [0.5, 1.5, 1000.3],
'a5': [dt.datetime.fromtimestamp(x) for x in [1440643875, 1440644191, 1440645638]],
'a6': [u'æski ēˈmōjē', u'😋', 's']
})
squareEvil = pd.DataFrame({
'src': [0,1,2,3],
'dst': [1,2,3,0],
'colors': [1, 1, 2, 2],
'list_int': [ [1], [2, 3], [4], []],
'list_str': [ ['x'], ['1', '2'], ['y'], []],
'list_bool': [ [True], [True, False], [False], []],
'list_date_str': [ ['2018-01-01 00:00:00'], ['2018-01-02 00:00:00', '2018-01-03 00:00:00'], ['2018-01-05 00:00:00'], []],
'list_date': [ [pd.Timestamp('2018-01-05')], [pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05')], [], []],
'list_mixed': [ [1], ['1', '2'], [False, None], []],
'bool': [True, False, True, True],
'char': ['a', 'b', 'c', 'd'],
'str': ['a', 'b', 'c', 'd'],
'ustr': [u'a', u'b', u'c', u'd'],
'emoji': ['😋', '😋😋', '😋', '😋'],
'int': [0, 1, 2, 3],
'num': [0.5, 1.5, 2.5, 3.5],
'date_str': ['2018-01-01 00:00:00', '2018-01-02 00:00:00', '2018-01-03 00:00:00', '2018-01-05 00:00:00'],
## API 1 BUG: Try with https://github.com/graphistry/pygraphistry/pull/126
'date': [dt.datetime(2018, 1, 1), dt.datetime(2018, 1, 1), dt.datetime(2018, 1, 1), dt.datetime(2018, 1, 1)],
'time': [pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05')],
## API 2 BUG: Need timedelta in https://github.com/graphistry/pygraphistry/blob/master/graphistry/vgraph.py#L108
'delta': [pd.Timedelta('1 day'), pd.Timedelta('1 day'), | pd.Timedelta('1 day') | pandas.Timedelta |
from collections import deque
import jax
import numpy as np
import pandas as pd
from skfda import FDataGrid
from skfda.representation.basis import Fourier
from tensorly.decomposition import tucker
from tensorly.tenalg import mode_dot
from finger_sense.utility import KL_divergence_normal, normalize
class Processor:
def __init__(self, dirs, latent_dim, n_basis, model_name='Gaussian'):
self.latent_dim = latent_dim
self.model_name = model_name
self.basis = Fourier([0, 2 * np.pi], n_basis=n_basis, period=1)
self.jacobian = jax.jacfwd(KL_divergence_normal, 0)
self.sensory_memory = deque(maxlen=2*n_basis-1)
self.init_model(dirs)
def init_model(self, dirs):
'''
Load prior knowldge and initialize the perception model
...
Parameters
----------
dirs : list of strings
Directories of core, factors
'''
if None in dirs.values():
self.core = None
self.factors = None
self.features = None
else:
self.core = | pd.read_csv(dirs['core_dir']) | pandas.read_csv |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import math
import statistics as stat
import pandas as pd
from nearest_neighbor import Neighborhood
from calculations import mean_square_error
def user_read(path):
user_frame = pd.DataFrame()
with open(path, "r") as file:
for line in file:
user, movie, rating, time = line.split()
if user not in user_frame.index:
row = pd.Series(name=user, dtype="float64")
user_frame = user_frame.append(row)
if movie not in user_frame.columns:
user_frame.insert(0, movie, float("nan"))
user_frame.loc[user][movie] = rating
user_frame = user_frame.reindex([str(i) for i in range(max(int(c) for c in user_frame.columns))], axis=1)
return user_frame
if __name__ == "__main__":
# Read in train and test data
train_data = user_read("data/u1-base.base")
test_data = user_read("data/u1-test.test")
# Make sure training and test data have the same features
test_data = test_data.reindex(train_data.columns, axis=1)
# Fill NaN in training with "average" rating (2)
train_data = train_data.fillna(2)
# K = 3 K-nearest neighbors
neighborhood = Neighborhood(3)
neighborhood.fit(train_data)
results = | pd.DataFrame(index=train_data.index, columns=train_data.columns) | pandas.DataFrame |
# import packages
import numpy as np
import pandas as pd
import os
from itertools import product
from collections.abc import Iterable
def allowed_output(value, reaction_vol_nl=20000, drop_size_nl=100, verbose=0):
"""Based on high ,low and stock concentrations and droplet size calculate how many combinations is possible
Parameters
----------
value: tuple
(low, high, stock concentration)
Returns
-------
calculated_concs:
a list of possible concentrations
calculated_vols:
a list of possible volumes
"""
if value['Conc_Values']:
if isinstance(value['Conc_Stock'], Iterable):
drop_nums = [i * reaction_vol_nl / (drop_size_nl * value['Conc_Stock'][find_stock(value['Conc_Values'], value['Conc_Stock'], i)[0]]) for i in value['Conc_Values']]
calculated_concs = value['Conc_Values']
else:
drop_nums = [i * reaction_vol_nl / (drop_size_nl * value['Conc_Stock']) for i in value['Conc_Values']]
calculated_concs = value['Conc_Values']
else:
drop_nums = list(range(int((value['Conc_Min'] * reaction_vol_nl) / (drop_size_nl * value['Conc_Stock'])),
int((value['Conc_Max'] * reaction_vol_nl) / (drop_size_nl * value['Conc_Stock'])) + 1))
calculated_concs = [drop_num * value['Conc_Stock'] * drop_size_nl / reaction_vol_nl for drop_num in drop_nums]
if verbose:
print('drops :', drop_nums)
print('volumes :', [i * drop_size_nl for i in drop_nums])
print('possible_concentrations :', calculated_concs)
else:
return calculated_concs, [i * drop_size_nl for i in drop_nums]
def percentage_possible(data, threshold=40):
"""Based on threshold volume, it calculates how many combinations of all metabolite is possible to make
Parameters
----------
data: dict
{'meatbolite name':[possible volumes], ...}
Returns
-------
percentage possible: float
total: int
total number of combinations (includes forbidden one)
"""
lists = list(data.values())
m = [len(i) for i in data.values()]
total = np.prod(np.array([len(i) for i in data.values()]))
possible = 0
for items in product(*lists):
if sum(items) <= threshold:
possible += 1
return (possible/total*100), total
def find_stock(conc_values, conc_stocks, this_value):
"""this function find each concentration value belongs to wich stock concentration for metabolites with multiple stocks
Parameters
----------
conc_values: list
a list of all possible concentration
conc_stocks: list
a list of all stocks concentration
this_value: float, int
concentration value that we find to find its stock
Returns
-------
i:
index of found stock
out:
value of found stock
"""
num = len(conc_stocks)
avg = len(conc_values) / float(num)
out = []
last = 0.0
while last < len(conc_values):
out.append(conc_values[int(last):int(last + avg)])
last += avg
for i, value in enumerate(out):
if this_value in value:
return i, out
# random combination generator function_v3.0
def random_combination_generator(concentrations_limits, number_of_combination=100, reaction_vol_nl=10000,
max_nl=None, drop_size_nl=100, check_repeat=True, rounded=2, verbose=0, make_csv=False, return_df=False):
"""this function make random combination that is safe (e.g. dont make too much or low concentrated, not excecutable based on drop size, not repetitive)
Parameters
----------
concentrations_limits: dict
{'name of metabolite': {'Conc_Min': #, 'Conc_Max': #, 'Conc_Values': #, 'Conc_Stock': #, 'Alternatives': #}, ...}
Returns
-------
data: pandas.DataFrame
a dataframe as consists of number_of_combination of random combinations
"""
# generating random combinations
combinations = []
data_point = 0
while data_point < number_of_combination:
input_data = []
input_vol = []
# verbosity
if (data_point % 10000 == 0) and verbose:
print(data_point)
# generation of random input
for key, value in concentrations_limits.items():
# Manual Concentration Value Generation
if value['Conc_Values']:
# With Alternatives
if value['Alternatives']:
num_alternative = len(value['Alternatives'])
choice_alternative = np.random.randint(0, num_alternative)
choice_list = [0 for i in range(num_alternative)]
choice_list[choice_alternative] = 1
choice_conc = np.random.choice(value['Conc_Values'])
input_data.append(choice_conc)
input_data += choice_list
if isinstance(value['Conc_Stock'], Iterable):
choice_stock, _ = find_stock(value['Conc_Values'], value['Conc_Stock'], choice_conc)
input_vol.append(choice_conc/value['Conc_Stock'][choice_stock]*reaction_vol_nl)
else:
input_vol.append(choice_conc/value['Conc_Stock']*reaction_vol_nl)
# Without Alternatives
else:
choice_conc = np.random.choice(value['Conc_Values'])
input_data.append(choice_conc)
if isinstance(value['Conc_Stock'], Iterable):
choice_stock, _ = find_stock(value['Conc_Values'], value['Conc_Stock'], choice_conc)
input_vol.append(choice_conc/value['Conc_Stock'][choice_stock]*reaction_vol_nl)
else:
input_vol.append(choice_conc/value['Conc_Stock']*reaction_vol_nl)
# Auto Concentration Value Generation
else:
# With Alternatives
if value['Alternatives']:
num_alternative = len(value['Alternatives'])
choice_alternative = np.random.randint(0, num_alternative)
choice_list = [0 for i in range(num_alternative)]
choice_list[choice_alternative] = 1
drop_num = np.random.randint(round(value['Conc_Min'] * (reaction_vol_nl / drop_size_nl) / value['Conc_Stock']),
round(value['Conc_Max'] * (reaction_vol_nl / drop_size_nl) / value['Conc_Stock']) + 1)
recalculated_conc = drop_num * value['Conc_Stock'] * drop_size_nl / reaction_vol_nl
input_data.append(recalculated_conc)
input_data += choice_list
input_vol.append(recalculated_conc/value['Conc_Stock']*reaction_vol_nl)
# Without Alternatives
else:
drop_num = np.random.randint(round(value['Conc_Min'] * (reaction_vol_nl / drop_size_nl) / value['Conc_Stock']),
round(value['Conc_Max'] * (reaction_vol_nl / drop_size_nl) / value['Conc_Stock']) + 1)
recalculated_conc = drop_num * value['Conc_Stock'] * drop_size_nl / reaction_vol_nl
input_data.append(recalculated_conc)
input_vol.append(recalculated_conc/value['Conc_Stock']*reaction_vol_nl)
# Checks
if check_repetitive and max_nl:
if input_data not in combinations and sum(input_vol)<= max_nl:
combinations.append(input_data)
data_point += 1
elif check_repetitive and not max_nl:
if input_data not in combinations:
combinations.append(input_data)
data_point += 1
elif not check_repetitive and max_nl:
if sum(input_vol)<= max_nl:
combinations.append(input_data)
data_point += 1
else:
combinations.append(input_data)
data_point += 1
# make column name:
columns_name = []
for key, value in concentrations_limits.items():
if not value['Alternatives']:
columns_name.append(key)
else:
columns_name.append(key)
alternative_name = ['{}_{}'.format(key, i) for i in value['Alternatives']]
columns_name += alternative_name
# making csv file
if make_csv:
data = pd.DataFrame(np.array(combinations), columns=columns_name)
data.to_csv('Random_Combination_1.csv', index=False)
# making dataframe
if return_df:
data = pd.DataFrame(np.array(combinations), columns=columns_name)
return data
return np.array(combinations)
# transform concentration DataFrame to volume (nanolitre) DataFrame
def concentration_to_volume(concentrations, concentrations_limits, reaction_mixture_vol_nl=10000,
fixed_parts={'Lysate': 0.33, 'Saline': 0.1}, round_deg=1, check_water=True):
"""Transform concentrations dataframe to volumes dataframe
option: add a fixed volumes to all combinations like Lysate
caution: concentrations unit and metabolite name in concentrations and concentrations_limits must be the same.
Parameters
----------
concentrations: pandas.DataFrame
random_combination_generator output
Returns
-------
data: pandas.DataFrame
a dataframe same as input in shape but volumes data
"""
# make a copy of original dataframe to avoid further change than can affect that
data = concentrations.copy(deep=True)
data_all = data.copy(deep=True)
data = data[[i for i in data.columns if '_' not in i]]
data *= reaction_mixture_vol_nl
for metabolite_name, value in concentrations_limits.items():
if isinstance(value['Conc_Stock'], Iterable):
print()
data[metabolite_name] = [round(data[metabolite_name][i] / value['Conc_Stock'][find_stock(value['Conc_Values'], value['Conc_Stock'], data_all[metabolite_name][i])[0]], round_deg) for i in range(len(data[metabolite_name]))]
else:
data[metabolite_name] = [round(data[metabolite_name][i] / value['Conc_Stock'], round_deg) for i in range(len(data[metabolite_name]))]
# add fix parts
if fixed_parts:
for key, value in fixed_parts.items():
data[key] = reaction_mixture_vol_nl * value
# add water to reach the reaction_mixture_vol_nl
data['water'] = reaction_mixture_vol_nl - data.sum(axis=1)
# for low stock concentration that is not possible to make, raise an error
# stock conc should be set in a way that doesn't raise this error to avoid further debugging
if check_water and not all(data['water'] >= 0): raise Exception("Oops, too concentrated combination!")
# add alternative
# make columns name list:
columns_name = []
Type_dic = {}
Stock_dic = {}
for key, value in concentrations_limits.items():
if value['Alternatives']:
columns_name.append(key)
columns_name.append('{}_Type'.format(key))
Type_dic[key] = []
else:
columns_name.append(key)
if isinstance(value['Conc_Stock'], Iterable):
columns_name.append('{}_Stock_Type'.format(key))
Stock_dic[key] = []
# Alternatives
for key in Type_dic.keys():
data_type = data_all[[i for i in data_all.columns if '{}_'.format(key) in i]]
for i in data_type.values:
Type_dic[key].append(concentrations_limits[key]['Alternatives'][np.where(i == 1.0)[0][0]])
Type_list = list(Type_dic.keys())
for key in Type_list:
Type_dic['{}_Type'.format(key)] = Type_dic.pop(key)
# Stock
for key in Stock_dic.keys():
Stock_dic[key] = [concentrations_limits[key]['Conc_Stock'][find_stock(concentrations_limits[key]['Conc_Values'], concentrations_limits[key]['Conc_Stock'], i)[0]] for i in data_all[key]]
Stock_list = list(Stock_dic.keys())
for key in Stock_list:
Stock_dic['{}_Stock_Type'.format(key)] = Stock_dic.pop(key)
data_final = pd.concat([data, pd.DataFrame(Type_dic), pd.DataFrame(Stock_dic)], axis=1)
return data_final[columns_name + list(fixed_parts.keys()) + ['water']]
def day_finder(file, file_format='csv'):
"""Find the first notcompleted day
Parameters
----------
file:
for now, it can only be 'Results'
Returns
-------
i: int
the first not completed day
"""
i = 1
while True:
if not os.path.isfile('{}_{}.{}'.format(file, i, file_format)):
return i
i += 1
def result_preprocess(day, desired_cols, range=20):
"""Preprocess Results.csv file to get desired columns and rows
caution: the target column name MUST be 'yield'
Parameters
----------
day:
Results_day.csv
desired_cols:
name of columns that you want from the results file
Returns
-------
data_m:
data in range
label_m:
label in range
data_specials:
other data
label_specials:
other labels
"""
results = pd.read_csv('Results_{}.csv'.format(day, day))
# m number pipeline
data_m = results[desired_cols].iloc[:range, :]
label_m = results[['yield']].iloc[:range, :]
# reference, control and specials
data_specials = results[desired_cols].iloc[range:, :]
label_specials = results[['yield']].iloc[range:, :]
return data_m, label_m, data_specials, label_specials
def check_repetitive(combination, df_main):
"""Check to avoid repetitive combinations
Parameters
----------
combination:
combinations that want to be checked
df_main: pandas.DataFrame
source dataframe
Returns
-------
boolean:
True: it exists in df_main
False: it's not
"""
comparison_df = df_main.merge(combination, indicator=True, how='outer')
if 'both' in comparison_df._merge.unique():
return False
else:
return True
def bayesian_optimization(regressors_list,
data, label,
concentrations_limits,
final_order,
df_main,
reaction_vol_nl=20000, max_nl=13200, drop_size_nl=100,
exploitation=1, exploration=1, test_size=100, pool_size=100000, verbose=0, day=1,
days_range=[20, 20, 20, 20, 20, 20, 20, 20, 20, 20],
batch_ucb=False):
"""Main bayesian optimization function
Parameters
----------
regressors_list:
a list consists of more than one regressor that has .fit and .predict feature
data: pandas.DataFrame
all previous day data
label: pandas.DataFrame
all previous day label
exploitation: 1
coefficient of focus on higher yield query
exploration: 1
coefficient of focus on a more informative query
test_size: 100
output combinations number
pool_size: 100000
how many random combinations to ask from the regressor list each round
caution: this parameter highly affects executions time
Returns
-------
chosen_combinations: pandas.DataFrame
combinations that expected to improve yield
if batch_ucb == True
Returns
-------
best sample based on ucb: pandas.Series, best sample's expected value: float
"""
# first fit training data on our models
for regressor in regressors_list:
regressor.fit(data.values, label.values)
# make random test data
df_1 = random_combination_generator(concentrations_limits, number_of_combination=pool_size,
reaction_vol_nl=reaction_vol_nl,
max_nl=max_nl, drop_size_nl=drop_size_nl, make_csv=False, return_df=True)
desired_cols = list(df_1.columns)
df_temp = df_1.copy(deep=True)
# Upper Confidence Bound
for index, regressor in enumerate(regressors_list):
df_1['pred_yield_{}'.format(index)] = regressor.predict(df_temp.values)
df_1['regressors_std'] = df_1[[str(i) for i in df_1.columns if 'pred_yield' in str(i)]].std(axis=1)
df_1['mean_vote'] = df_1[[str(i) for i in df_1.columns if 'pred_yield' in str(i)]].mean(axis=1)
df_1['UCB'] = exploitation * df_1['mean_vote'] + exploration * df_1['regressors_std']
df_1 = df_1.sort_values(['UCB'], ascending=False)
if batch_ucb:
return df_1[final_order].iloc[0:1, :], df_1['mean_vote'].values[0]
# check to don`t make repeated combinations, but it is not likely
chosen_combinations = pd.DataFrame(columns=desired_cols)
num = 0
for i in df_1[desired_cols].values:
temp_combination = pd.DataFrame([i], columns=desired_cols)
if check_repetitive(temp_combination, df_main):
num += 1
chosen_combinations = pd.concat([chosen_combinations, temp_combination]).reset_index(drop=True)
if num == test_size:
break
return chosen_combinations[final_order]
# Batch UCB on top of bayesian optimization
def batch_ucb(regressors_list,
data, label,
concentrations_limits,
final_order,
df_main,
reaction_vol_nl=20000, max_nl=13200, drop_size_nl=100,
exploitation=1, exploration=1, test_size=100, pool_size=100000, verbose=0, day=1,
days_range=[20, 20, 20, 20, 20, 20, 20, 20, 20, 20]):
"""Batch UCB on top of bayesian optimization function
Parameters
----------
regressors_list:
a list consists of more than one regressor that has .fit and .predict feature
data: pandas.DataFrame
all previous day data
label: pandas.DataFrame
all previous day label
exploitation: 1
coefficient of focus on higher yield query
exploration: 1
coefficient of focus on a more informative query
test_size: 100
output combinations number
pool_size: 100000
how many random combinations to ask from the regressor list each round
caution: this parameter highly affects executions time
Returns
-------
chosen_combinations: pandas.DataFrame
combinations that expected to improve yield
"""
final_samples = []
for i in range(test_size):
sample, expected_value = bayesian_optimization(regressors_list, data, label, concentrations_limits,
final_order=final_order,
df_main = df_main,
reaction_vol_nl=reaction_vol_nl, max_nl=max_nl,
drop_size_nl=drop_size_nl,
exploitation=exploitation, exploration=exploration, test_size=test_size, pool_size=pool_size, verbose=0, day=day, days_range = days_range,
batch_ucb=True)
final_samples.append(sample)
data = pd.concat([data, sample], axis=0).reset_index(drop=True)
label = pd.concat([label, pd.DataFrame({'yield': [expected_value]})], axis=0).reset_index(drop=True)
return | pd.concat(final_samples) | pandas.concat |
from functools import reduce
from itertools import groupby
import pandas as pd
from matplotlib import pyplot as plt
from src.orders.order import OrderStatus
class BackTester:
"""
Purpose: Backtesting and output performance report
"""
def __init__(self, strategy: str = '', initial_cash: float = 10000, commission: float = .0, lot_size: float = 100000):
self.strategy = strategy
self.initial_cash = initial_cash
self.commission = commission
self.lot_size = lot_size
def run(self, price_feed: pd.DataFrame, orders: list, print_stats=True, output_csv=False, suffix='') -> pd.DataFrame:
"""
bask testing strategies
:param price_feed: Price feed DataFrame
:param orders: list of Orders
:param print_stats: bool, printout stats
:param output_csv: bool, output csv
:param suffix: used for chart plotting in order to differentiate strategy with different parameters
:return: pd.DataFrame
"""
price_dict = price_feed.to_dict('index')
performance = []
for time, ohlc in price_dict.items():
for o in orders:
should_take_action = o.is_open and time >= o.last_update
if should_take_action:
# Fill pending orders
if o.is_pending:
if o.is_long:
if ohlc['high'] > o.entry: # buy order filled
o.fill(time)
elif o.is_short:
if ohlc['low'] < o.entry: # sell order filled
o.fill(time)
# Close filled orders
if o.is_filled:
if o.is_long:
if ohlc['low'] <= o.sl:
o.close_with_loss(time)
elif ohlc['high'] > o.tp:
o.close_with_win(time)
elif o.is_short:
if ohlc['high'] >= o.sl:
o.close_with_loss(time)
elif ohlc['low'] < o.tp:
o.close_with_win(time)
position = sum(o.pnl for o in orders) * self.lot_size + self.initial_cash # 1 standard lot = 100,000
performance.append({
'time': time,
f'pnl{suffix}': position
})
if print_stats:
self.print_stats(orders)
if output_csv:
self.output_csv(orders)
return | pd.DataFrame(performance) | pandas.DataFrame |
from datarobot.models import feature
from . import utils
from . import _config
import time
from tqdm import tqdm # TODO check if we are in a notebook
from sklearn.feature_selection import f_regression, f_classif
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.utils import check_array
from typing import List
from typing import Callable
from typing import Tuple
from typing import Union
import pandas as pd
import numpy as np
from statistics import mean
from math import ceil
import datarobot as dr
class RAPABase():
"""
The base of regression and classification RAPA analysis
"""
POSSIBLE_TARGET_TYPES = [x for x in dir(dr.enums.TARGET_TYPE) if not x.startswith('__')] # List of DR TARGET_TYPES
"""_classification = None # Set by child classes
target_type = None # Set at initialization
# target_name = None # Set with 'create_submittable_dataframe()'
project = None # Set at initialization or with 'perform_parsimony()'"""
def __init__(self, project: Union[dr.Project, str] = None):
if self.__class__.__name__ == "RAPABase":
raise RuntimeError("Do not instantiate the RAPABase class directly; use RAPAClassif or RAPARegress")
self._classification = None
self.target_type = None
self.project = None
def create_submittable_dataframe(self,
input_data_df: pd.DataFrame,
target_name: str,
max_features: int = 19990,
n_splits: int = 6,
filter_function: Callable[[pd.DataFrame, np.ndarray], List[np.ndarray]] = None,
random_state: int = None) -> Tuple[pd.DataFrame, str]: #TODO: change return type, inplace option
"""Prepares the input data for submission as either a regression or classification problem on DataRobot.
Creates pre-determined k-fold cross-validation splits and filters the feature
set down to a size that DataRobot can receive as input, if necessary. TODO: private function submit_datarobot_project explanation
## Parameters
----------
input_data_df: pandas.DataFrame
pandas DataFrame containing the feature set and prediction target.
target_name: str
Name of the prediction target column in `input_data_df`.
max_features: int, optional (default: 19990)
The number of features to reduce the feature set in `input_data_df`
down to. DataRobot's maximum feature set size is 20,000.
n_splits: int, optional (default: 6)
The number of cross-validation splits to create. One of the splits
will be retained as a holdout split, so by default this function
sets up the dataset for 5-fold cross-validation with a holdout.
filter_function: callable, optional (default: None)
The function used to calculate the importance of each feature in
the initial filtering step that reduces the feature set down to
`max_features`.
This filter function must take a feature matrix as the first input
and the target array as the second input, then return two separate
arrays containing the feature importance of each feature and the
P-value for that correlation, in that order.
When None, the filter function is determined by child class.
If an instance of `RAPAClassif()`, sklearn.feature_selection.f_classif is used.
If `RAPARegress()`, sklearn.feature_selection.f_regression is used.
See scikit-learn's f_classif function for an example:
https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.f_regression.html
random_state: int, optional (default: None)
The random number generator seed for RAPA. Use this parameter to make sure
that RAPA will give you the same results each time you run it on the
same input data set with that seed.
Returns
----------
pandas.DataFrame
DataFrame holds original values from the input Dataframe, but with
pre-determined k-fold cross-validation splits, and was
filtered down to 'max_features' size using the 'filter_function'
"""
#TODO: make private function?
# Check dataframe has 'target_name' columns
if target_name not in input_data_df.columns:
raise KeyError(f'{target_name} is not a column in the input DataFrame')
# Check that the dataframe can be copied and remove target_name column TODO: inplace option
input_data_df = input_data_df.copy()
only_features_df = input_data_df.drop(columns=[target_name])
# Set target_type, kfold_type, and filter_function based on type of classification/regression problem
if self._classification:
# Check if binary or multi classification problem
if len(np.unique(input_data_df[target_name].values)) == 2:
self.target_type = dr.enums.TARGET_TYPE.BINARY
else:
self.target_type = dr.enums.TARGET_TYPE.MULTICLASS
kfold_type = StratifiedKFold
filter_function = f_classif
else:
# Check array for infinite values/NaNs
check_array(input_data_df)
kfold_type = KFold
self.target_type = dr.enums.TARGET_TYPE.REGRESSION
filter_function = f_regression
# Create 'partition' column and set all values to 'train'
input_data_df['partition'] = 'train'
train_feature_importances = []
# Make cross validation folds
fold_name_prefix = 'CV Fold'
for fold_num, (_, fold_indices) in enumerate(kfold_type(n_splits=n_splits, random_state=random_state, shuffle=True).split(only_features_df.values,
input_data_df[target_name].values)):
# Replace the values in the partition column with their true CV fold, removing all 'train' entries
input_data_df.iloc[fold_indices, input_data_df.columns.get_loc('partition')] = f'{fold_name_prefix} {fold_num}'
# Fold 0 is the holdout set, so don't calculate feature importances using that fold
if fold_num > 0:
feature_importances, _ = filter_function(only_features_df.iloc[fold_indices].values, input_data_df[target_name].iloc[fold_indices].values)
train_feature_importances.append(feature_importances)
# We calculate the overall feature importance scores by averaging the feature importance scores across all of the training folds
avg_train_feature_importances = np.mean(train_feature_importances, axis=0)
# Change parition 0 name to 'Holdout'
input_data_df.loc[input_data_df['partition'] == f'{fold_name_prefix} 0', 'partition'] = 'Holdout'
# TODO: break shcnasty 1 liners
most_correlated_features = only_features_df.columns.values[np.argsort(avg_train_feature_importances)[::-1][:max_features]].tolist()
# have target_name, partition, and most correlated features columns in dr_upload_df TODO: make a better comment
datarobot_upload_df = input_data_df[[target_name, 'partition'] + most_correlated_features]
return datarobot_upload_df
def submit_datarobot_project(self,
input_data_df: pd.DataFrame,
target_name: str,
project_name: str,
target_type: str = None,
worker_count: int = -1,
mode: str = dr.AUTOPILOT_MODE.FULL_AUTO,
random_state: int = None) -> dr.Project: #TODO check input df for partition, target_name (data-robotified df), logger.warning
"""Submits the input data to DataRobot as a new modeling project.
It is suggested to prepare the `input_data_df` using the
'create_submittable_dataframe' function first with an instance of
either RAPAClassif or RAPARegress.
Parameters
----------
input_data_df: pandas.DataFrame
pandas DataFrame containing the feature set and prediction target.
target_name: str
Name of the prediction target column in `input_data_df`.
project_name: str
Name of the project in DataRobot.
target_type: str (enum)
Indicator to DataRobot of whether the new modeling project should be
a binary classification, multiclass classification, or regression project.
Options:
datarobot.TARGET_TYPE.BINARY
datarobot.TARGET_TYPE.REGRESSION
datarobot.TARGET_TYPE.MULTICLASS
worker_count: int, optional (default: -1)
The number of worker engines to assign to the DataRobot project.
By default, -1 tells DataRobot to use all available worker engines.
mode: str (enum), optional (default: datarobot.AUTOPILOT_MODE.FULL_AUTO)
The modeling mode to start the DataRobot project in.
Options:
datarobot.AUTOPILOT_MODE.FULL_AUTO
datarobot.AUTOPILOT_MODE.QUICK
datarobot.AUTOPILOT_MODE.MANUAL
datarobot.AUTOPILOT_MODE.COMPREHENSIVE: Runs all blueprints in
the repository (warning: this may be extremely slow).
random_state: int, optional (default: None)
The random number generator seed for DataRobot. Use this parameter to make sure
that DataRobot will give you the same results each time you run it on the
same input data set with that seed.
"""
# Check for a target_type
if target_type == None or target_type not in self.POSSIBLE_TARGET_TYPES:
target_type = self.target_type
if target_type == None:
raise Exception(f'No target type.')
project = dr.Project.create(sourcedata=input_data_df, project_name=project_name)
project.set_target(target=target_name, target_type=target_type,
worker_count=worker_count, mode=mode,
advanced_options=dr.AdvancedOptions(seed=random_state, accuracy_optimized_mb=False,
prepare_model_for_deployment=False, blend_best_models=False),
partitioning_method=dr.UserCV(user_partition_col='partition', cv_holdout_level='Holdout'))
return project
def perform_parsimony(self, feature_range: List[Union[int, float]],
project: Union[dr.Project, str] = None,
starting_featurelist: str = 'Informative Features',
featurelist_prefix: str = 'RAPA Reduced to',
mode: str = dr.AUTOPILOT_MODE.FULL_AUTO,
lives: int = None, # TODO
cv_variance_limit: float = None, # TODO
feature_importance_statistic: str = 'median',
progress_bar: bool = True,
to_graph: List[str] = None, # TODO
metric: str = None):
"""Performs parsimony analysis by repetatively extracting feature importance from
DataRobot models and creating new models with reduced features (smaller feature lists). # TODO take a look at featurelist_prefix for running multiple RAPA
Parameters:
----------
feature_range: list[int] | list[float]
Either a list containing integers representing desired featurelist lengths,
or a list containing floats representing desired featurelist percentages (of the original featurelist size)
project: datarobot.Project | str, optional (default = None)
Either a datarobot project, or a string of it's id or name. If None,
uses the project that was provided to create the rapa class
starting_featurelist: str, optional (default = 'Informative Features')
The name or id of the featurelist that rapa will start pasimony analysis with
featurelist_prefix: str, optional (default = 'RAPA Reduced to')
The desired prefix for the featurelists that rapa creates in datarobot. Each featurelist
will start with the prefix, include a space, and then end with the number of features in that featurelist
mode: str (enum), optional (default: datarobot.AUTOPILOT_MODE.FULL_AUTO)
The modeling mode to start the DataRobot project in.
Options:
datarobot.AUTOPILOT_MODE.FULL_AUTO
datarobot.AUTOPILOT_MODE.QUICK
datarobot.AUTOPILOT_MODE.MANUAL
datarobot.AUTOPILOT_MODE.COMPREHENSIVE: Runs all blueprints in
the repository (warning: this may be extremely slow).
lives: int, optional (default = None)
The number of times allowed for reducing the featurelist and obtaining a worse model. By default,
'lives' are off, and the entire 'feature_range' will be ran, but if supplied a number >= 0, then
that is the number of 'lives' there are.
Ex: lives = 0, feature_range = [100, 90, 80, 50]
RAPA finds that after making all the models for the length 80 featurelist, the 'best' model was created with the length
90 featurelist, so it stops and doesn't make a featurelist of length 50.
Similar to datarobot's Feature Importance Rank Ensembling for advanced feature selection (FIRE) package's 'lifes'
https://www.datarobot.com/blog/using-feature-importance-rank-ensembling-fire-for-advanced-feature-selection/
cv_mean_error_limit: float, optional (default = None)
The limit of cross validation mean error to help avoid overfitting. By default, the limit is off,
and the each 'feature_range' will be ran. Limit exists only if supplied a number >= 0.0
Ex: 'feature_range' = 2.5, feature_range = [100, 90, 80, 50]
RAPA finds that the average AUC for each CV fold is [.8, .6, .9, .5] respectfully,
the mean of these is 0.7. The average error is += 0.15. If 0.15 >= cv_mean_error_limit,
the training stops.
feature_importance_statistic: str, optional (default = 'median')
How RAPA will decide each feature's importance over every model in a feature list
OPTIONS: ['median', 'mean', 'cumulative']
progress_bar: bool, optional (default = True)
If True, a simple progres bar displaying complete and incomplete featurelists.
If False, provides updates in stdout Ex: current worker count, current featurelist, etc.
to_graph: List[str], optional (default = None)
A list of keys choosing which graphs to produce
metric: str, optional (default = None)
The metric used for scoring models. Used when finding the 'best' model, and when
plotting model performance
When None, the metric is determined by what class inherits from base. For instance,
a `RAPAClassif` instance's default is 'AUC', and `RAPARegress` is 'R Squared'.
"""
# TODO: check the entire list for type? and make the logic more logical
# TODO: exceptions raised are almost always generic, look at raising specific exceptions?
# TODO: Support graphing over time
# TODO: return a dictionary of values? {"time_taken": 2123, "cv_mean_error": list[floats], ""}
# check project
if project == None:
project = self.project
if project == None:
raise Exception('No provided datarobot.Project()')
# check scoring metric TODO: support more scoring metrics
if metric == None:
if self._classification: # classification
metric = 'AUC'
else: # regression
metric = 'R Squared'
# check if project is a string, and if it is, find it
if type(project) == str:
project = utils.find_project(project)
if project == None:
raise Exception(f'Could not find the project.')
# get starting featurelist
try:
starting_featurelist = utils.get_featurelist(starting_featurelist, project)
except: # TODO: flesh out exceptions
print("Something went wrong getting the starting featurelist...")
# check feature_range size
if len(feature_range) == 0:
raise Exception(f'The provided feature_range is empty.')
# feature_range logic for sizes (ints) / ratios (floats)
if type(feature_range[0]) == int: #TODO np.all(float) np.all(int)
feature_range_check = [x for x in feature_range if x < len(starting_featurelist.features)-2 and x > 0] # -2 because of target feature and partitions TODO: CHECK FOR FEATURE/PARTITIONS INSTEAD OF JUST SUBTRACTING 2
if len(feature_range_check) != len(feature_range): # check to see if values are < 0 or > the length of the original featurelist
raise Exception('The provided feature_range integer values have to be: 0 < feature_range < original featurelist length')
elif type(feature_range[0]) == float:
feature_range_check = [x for x in feature_range if x > 0 and x < 1]
if len(feature_range_check) != len(feature_range):
raise Exception(f'The provided feature_range ratio values have to be: 0 < feature_range < 1')
# convert ratios to featurelist sizes
original_featurelist_size = len(starting_featurelist.features)-2 # -2 because of target feature and partitions
feature_range = [ceil(original_featurelist_size * feature_pct) for feature_pct in feature_range] # multiply by feature_pct and take ceil
feature_range = pd.Series(feature_range).drop_duplicates() # drop duplicates
feature_range = list(feature_range[feature_range < original_featurelist_size]) # take all values that less than the original featurelist size
feature_range.sort(reverse=True) # sort descending
# get the models from starting featurelist
datarobot_project_models = project.get_models()
for model in datarobot_project_models: # for each model
if model.featurelist_id == starting_featurelist.id: # if the model uses the starting featurelist, request the feature impact
if model.metrics[metric]['crossValidation'] != None:
try:
model.request_feature_impact()
except dr.errors.JobAlreadyRequested:
continue
# waiting for DataRobot projects TODO tqdm/multithreading/print tqdm function for printing things w/o messing things up
# TODO make helper function..?
# TODO check to see if datarobot made a function
# TODO request_featureimpact returns a job indicator?
while len(project.get_all_jobs()) > 0:
if progress_bar: # PROGRESS BAR
print(f'There are {str(project.get_all_jobs())} jobs remaining...'.ljust(33), end='\r') # TODO: Make this better
time.sleep(5)
# get feature impact/importances of original featurelist
all_feature_importances = []
for model in datarobot_project_models:
if model.featurelist_id == starting_featurelist.id: # if the model uses the starting featurelist, request the feature impact
if model.metrics[metric]['crossValidation'] != None:
all_feature_importances.extend(model.get_feature_impact())
# sort by features by feature importance statistic TODO: better way to do this, dictionary w/ [median:pd.DataFrame.median()] ?
stat_feature_importances = pd.DataFrame(all_feature_importances).groupby('featureName')['impactNormalized']
if feature_importance_statistic.lower() == 'median':
stat_feature_importances = stat_feature_importances.median().sort_values(ascending=False)
elif feature_importance_statistic.lower() == 'mean':
stat_feature_importances = stat_feature_importances.mean().sort_values(ascending=False)
elif feature_importance_statistic.lower() == 'cumulative':
stat_feature_importances = stat_feature_importances.sum().sort_values(ascending=False)
else: # feature_importance_statistic isn't one of the provided statistics
raise ValueError(f'The provided feature_importance_statistic:{feature_importance_statistic} is not one of the provided:{_config.feature_importance_statistics}')
# waiting for DataRobot projects
while len(project.get_all_jobs()) > 0:
if not progress_bar: # PROGRESS BAR
print(f'There are {str(project.get_all_jobs())} jobs remaining...'.ljust(33), end='\r') # TODO: Make this better/work. currently not printing?
time.sleep(5)
# get the best performing model of this iteration
last_best_model = utils.get_best_model(project, metric=metric)
# perform parsimony
for featurelist_length in tqdm(feature_range):
try:
# get shortened featurelist
desired_reduced_featurelist_size = featurelist_length
reduced_features = stat_feature_importances.head(desired_reduced_featurelist_size).index.values.tolist()
# create new featurelist in datarobot
new_featurelist_name = '{} {}'.format(featurelist_prefix, len(reduced_features)) # TODO have some suffix added, move try except
reduced_featurelist = project.create_featurelist(name=new_featurelist_name, features=reduced_features)
# submit new featurelist and create models
project.start_autopilot(featurelist_id=reduced_featurelist.id, mode=mode, blend_best_models=False, prepare_model_for_deployment=False)
project.wait_for_autopilot(verbosity=dr.VERBOSITY_LEVEL.SILENT)
datarobot_project_models = project.get_models()
for model in datarobot_project_models:
if model.featurelist_id == reduced_featurelist.id and model.metrics[metric]['crossValidation'] != None:
try:
model.request_feature_impact()
except dr.errors.JobAlreadyRequested:
pass
# API note: Is there a project-level wait function for all jobs, regardless of AutoPilot status?
while len(project.get_all_jobs()) > 0:
if not progress_bar: # PROGRESS BAR
print(f'There are {str(project.get_all_jobs())} jobs remaining...'.ljust(33), end='\r') # TODO: Make this better/work. currently not printing?
time.sleep(5)
while(len(all_feature_importances) == 0):
all_feature_importances = []
for model in datarobot_project_models:
if model.featurelist_id == reduced_featurelist.id and model.metrics[metric]['crossValidation'] != None:
all_feature_importances.extend(model.get_feature_impact())
time.sleep(5)
# sort by features by feature importance statistic TODO: better way to do this, dictionary w/ [median:pd.DataFrame.median()] ?
stat_feature_importances = | pd.DataFrame(all_feature_importances) | pandas.DataFrame |
import mayavi
import neuromaps
import fcn_megfmri
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn.metrics
import scipy.stats as stats
import matplotlib.pyplot as plt
from statsmodels.stats import multitest
from neuromaps.parcellate import Parcellater
# load data
megdata = np.load('../../data/groupFCmeg_aec_orth_schaefer400.npy.npz')
avgFCmeg = megdata['megfc']
bands = megdata['bands']
avgFCmri = np.load('../../data/groupFCmri_schaefer400.npy')
# load schaefer info
schaeferpath = '../../data/schaefer/'
lhlabels = (schaeferpath + 'HCP/fslr32k/gifti/' +
'Schaefer2018_400Parcels_7Networks_order_lh.label.gii')
rhlabels = (schaeferpath + 'HCP/fslr32k/gifti/' +
'Schaefer2018_400Parcels_7Networks_order_rh.label.gii')
labelinfo = np.loadtxt(schaeferpath + 'HCP/fslr32k/gifti/' +
'Schaefer2018_400Parcels_7Networks_order_info.txt',
dtype='str', delimiter='tab')
rsnlabels = []
for row in range(0, len(labelinfo), 2):
rsnlabels.append(labelinfo[row].split('_')[2])
# load coordinates and estimate distance
coor = np.loadtxt(schaeferpath + '/Schaefer_400_centres.txt', dtype=str)
coor = coor[:, 1:].astype(float)
distance = sklearn.metrics.pairwise_distances(coor)
# get custom colormaps
cmap_seq, cmap_seq_r, megcmap, megcmap2, categ_cmap = fcn_megfmri.make_colormaps()
plt.rcParams['svg.fonttype'] = 'none'
plt.rcParams['font.sans-serif'] = ['Myriad Pro']
plt.rcParams['font.size'] = 18.0
####################################
# linear regression
####################################
# regional model
rsq, full_pred_fc, corrVal = fcn_megfmri.regional_lreg(megdata=avgFCmeg,
fmridata=avgFCmri,
distance=distance,
correct_dist=False,
adjusted_rsq=True)
# global model
rsq_g, full_pred_fc_g, corrVal_g = fcn_megfmri.global_lreg(megdata=avgFCmeg,
fmridata=avgFCmri,
distance=distance,
correct_dist=False,
adjusted_rsq=True)
####################################
# visualize predicted fc
####################################
nnode = len(rsq)
masku = np.mask_indices(nnode, np.triu, 1)
# regional model
plt.figure()
plt.imshow(full_pred_fc, vmin=0, vmax=0.55,
cmap=cmap_seq_r)
plt.title('predicted fmri fc - upper tri (regional)')
plt.colorbar()
# scatter plot
xlab = 'empirical fmri fc'
ylab = 'predicted fmri fc'
title = 'regional model: pearson r = %1.3f' % (stats.pearsonr(avgFCmri[masku],
full_pred_fc[masku])[0])
plt.figure()
myplot = fcn_megfmri.scatterregplot(avgFCmri[masku],
full_pred_fc[masku],
title, xlab, ylab, 50)
myplot.figure.set_figwidth(7)
myplot.figure.set_figheight(7)
# global model
plt.figure()
plt.imshow(full_pred_fc_g, vmin=0, vmax=0.35,
cmap=cmap_seq_r)
plt.title('predicted fmri fc - upper tri (global)')
plt.colorbar()
# scatter plot
xlab = 'empirical fmri fc'
ylab = 'predicted fmri fc'
title = 'global model: pearson r = %1.3f' % (stats.pearsonr(avgFCmri[masku],
full_pred_fc_g[masku])[0])
plt.figure()
myplot = fcn_megfmri.scatterregplot(avgFCmri[masku],
full_pred_fc_g[masku],
title, xlab, ylab, 50)
myplot.figure.set_figwidth(7)
myplot.figure.set_figheight(7)
####################################
# visualize R2 distribution
####################################
# plot regional R2 on brain
toplot = np.array(rsq)
brains = fcn_megfmri.plot_conte69(toplot, lhlabels, rhlabels,
vmin=np.percentile(toplot, 2.5),
vmax=np.percentile(toplot, 97.5),
colormap='viridis', customcmap=megcmap,
colorbartitle='adjusted rsq',
surf='inflated')
# plot histogram distribution
fig, ax = plt.subplots(1, 1)
plt.hist(rsq, density=False, rwidth=0.9,
color=[153/255, 153/255, 153/255], label='regional fit')
plt.vlines([rsq_g], ymin=0, ymax=115, linewidth=3,
color=[242/255, 111/255, 136/255], label='global fit')
plt.xlabel('R-squared')
plt.ylabel('count')
plt.title('meg-fmri mapping')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
####################################
# functional hierarchy
####################################
# use fmri data from same subjects to get fc gradient
grads, lambdas = fcn_megfmri.get_gradients(avgFCmri, ncomp=10)
ncomp = 0
fcgrad = grads[:, ncomp]
toplot = fcgrad
brains = fcn_megfmri.plot_conte69(toplot, lhlabels, rhlabels,
vmin=np.percentile(toplot, 2.5),
vmax=np.percentile(toplot, 97.5),
colormap='viridis', customcmap=megcmap,
colorbartitle='fc gradient %s' % (ncomp+1),
surf='inflated')
# or use neuromaps to get fc gradient of all HCP subjects
grad1 = neuromaps.datasets.fetch_annotation(desc='fcgradient01')
parcellation = [lhlabels, rhlabels]
parc = Parcellater(parcellation, 'fslr')
parcellated_grad1 = parc.fit_transform(grad1, 'fslr')
# correlate and plot
x = fcgrad
y = rsq
corr = stats.spearmanr(x, y)
pvalspin = fcn_megfmri.get_spinp(x, y, corrval=corr[0], nspin=10000,
corrtype='spearman',
lhannot=lhlabels, rhannot=rhlabels)
title = 'spearman r = %1.3f - p (spin) = %1.4f' % (corr[0], pvalspin)
xlab = 'fc hierarchy'
ylab = 'R-squared'
plt.figure()
fcn_megfmri.scatterregplot(x, y, title, xlab, ylab, 60)
####################################
# intrinsic networks
####################################
# plot
network_labels = rsnlabels
rsnlabelsabb = np.unique(rsnlabels)
network_score = pd.DataFrame(data={'R-squared': rsq,
'rsn': network_labels})
medianVal = np.vstack([network_score.loc[network_score[
'rsn'].eq(netw),
'R-squared'].median() for netw in rsnlabelsabb])
idx = np.argsort(-medianVal.squeeze())
plot_order = [rsnlabelsabb[k] for k in idx]
sns.set(style='ticks', palette='pastel')
ax = sns.boxplot(x='rsn', y='R-squared',
data=network_score,
width=0.5, fliersize=3, showcaps=False,
order=plot_order, showfliers=False)
sns.despine(ax=ax, offset=5, trim=True)
plt.setp(ax.artists, edgecolor = 'k', facecolor='w')
plt.setp(ax.lines, color='k')
ax.figure.set_figwidth(7)
ax.figure.set_figheight(6)
plt.tight_layout()
####################################
# dominance analysis
####################################
# estimate band-specific contribution to R2 for each node
percentDominance_adj = fcn_megfmri.get_percent_dominance(megdata=avgFCmeg,
fmridata=avgFCmri,
distance=distance,
correct_dist=False,
adjusted_rsq=True)
# percent dominance boxplots
for n, band in enumerate(bands):
if n == 0:
bandContrib = pd.DataFrame(data={'percent': percentDominance_adj[:, n],
'band': [band] * 400})
else:
temp = pd.DataFrame(data={'percent': percentDominance_adj[:, n],
'band': [band] * 400})
bandContrib = pd.concat([bandContrib, temp], axis=0)
bandContrib['percent'] = bandContrib['percent'].astype(float)
medianVal = np.vstack([bandContrib.loc[bandContrib[
'band'].eq(band),
'percent'].median() for band in bands])
idx = np.argsort(-medianVal.squeeze())
plot_order = [bands[k] for k in idx]
sns.set(style='ticks', palette='pastel')
plt.figure()
ax = sns.boxplot(x='band', y='percent', data=bandContrib,
width=.45, fliersize=3, showcaps=False,
order=plot_order, showfliers=False)
sns.despine(ax=ax, offset=5, trim=True)
ax.axes.set_title('Dominance analysis')
plt.setp(ax.artists, edgecolor = 'k', facecolor='w')
plt.setp(ax.lines, color='k')
ax.figure.set_figwidth(6)
ax.figure.set_figheight(6)
plt.tight_layout()
####################################
# percent dominance in intrinsic networks
####################################
percentDominancrsn = pd.DataFrame(percentDominance_adj, columns=bands)
percentDominancrsn['rsn'] = rsnlabels
bandrsncontrib = []
for band in bands:
banddata = percentDominancrsn[band]
tempdf = pd.DataFrame(banddata, columns=[band])
tempdf['rsn'] = percentDominancrsn['rsn']
meanrsnContrib = tempdf.groupby(['rsn']).mean()
bandrsncontrib.append(meanrsnContrib)
orig_order = list(meanrsnContrib.index)
bandrsncontrib = np.array(bandrsncontrib).squeeze()
new_order = ['Default', 'Cont', 'Limbic', 'SalVentAttn', 'DorsAttn',
'SomMot', 'Vis']
new_order_idx = [orig_order.index(rsnname) for rsnname in new_order]
reordered_bandrsncontrib = bandrsncontrib[:, new_order_idx]
ax = sns.heatmap(reordered_bandrsncontrib, cmap=cmap_seq, yticklabels=bands,
vmin=np.min(reordered_bandrsncontrib),
vmax=np.max(reordered_bandrsncontrib), linewidth=0.5,
xticklabels=new_order)
ax.axes.set_title('percent dominance for intrinsic nerworks')
ax.figure.set_figwidth(7)
ax.figure.set_figheight(6)
plt.tight_layout()
####################################
# maximum contributing band
####################################
# identify maximum contributing band for each node
nnode = len(percentDominance_adj)
maxContrib = []
for node in range(nnode):
maxContrib.append(bands[np.argmax(percentDominance_adj[node, :])])
maxContribidx = np.zeros((nnode, 1))
for n, band in enumerate(bands):
idx = np.where(np.array(maxContrib) == band)[0]
maxContribidx[idx] = n
# plot on brain surface
toplot = maxContribidx
brains = fcn_megfmri.plot_conte69(toplot, lhlabels, rhlabels,
colormap='viridis', customcmap=categ_cmap,
colorbartitle=('max contribution'),
surf='inflated', vmin=0, vmax=5)
####################################
# band-specific contribution
####################################
# plot band contribution on surface
for n, band in enumerate(bands):
toplot = percentDominance_adj[:, n]
brains = fcn_megfmri.plot_conte69(toplot, lhlabels, rhlabels,
vmin=np.percentile(toplot, 2.5),
vmax=np.percentile(toplot, 97.5),
colormap='viridis', customcmap=megcmap2,
colorbartitle=band,
surf='inflated')
####################################
# linear regression: distance-dependant CV
####################################
# regional model with distance dependent cross-validation
train_corr, test_corr = fcn_megfmri.regional_lreg_cv(megdata=avgFCmeg,
fmridata=avgFCmri,
distance=distance,
coor=coor,
correct_dist=False,
train_pct=0.75,
verbose=True)
# plot on brain surface
toplot = test_corr
brains = fcn_megfmri.plot_conte69(toplot, lhlabels, rhlabels,
vmin=np.percentile(toplot, 2.5),
vmax=np.percentile(toplot, 97.5),
colormap='viridis', customcmap=megcmap,
colorbartitle='training set - Pearson r',
surf='inflated')
# correlate and plot
x = train_corr
y = test_corr
corr = stats.spearmanr(x, y)
pvalspin = fcn_megfmri.get_spinp(x, y, corrval=corr[0], nspin=10000,
corrtype='spearman',
lhannot=lhlabels, rhannot=rhlabels)
title = 'Spearman r = %1.3f - p (spin) = %1.4f' % (corr[0], pvalspin)
xlab = 'Pearson r - training set'
ylab = 'Pearson r - test set'
plt.figure()
fcn_megfmri.scatterregplot(x, y, title, xlab, ylab, 60)
####################################
# linear regression: subj-level (leave-one-subject out)
####################################
# subject data are not included
subjFCmri = np.load('../../data/subjFCmri_schaefer400.npy')
subjFCmeg = np.load('../../data/subjFCmeg_aec_orth_schaefer400.npy')
train_corr, test_corr = fcn_megfmri.regional_lreg_subj(subjmegdata=subjFCmeg,
subjfmridata=subjFCmri,
distance=distance,
correct_dist=False,
verbose=True)
# plot on brain surface
toplot = test_corr
brains = fcn_megfmri.plot_conte69(toplot, lhlabels, rhlabels,
vmin=np.percentile(toplot, 2.5),
vmax=np.percentile(toplot, 97.5),
colormap='viridis', customcmap=megcmap,
colorbartitle='training set - Pearson r',
surf='inflated')
# correlate and plot
x = train_corr
y = test_corr
corr = stats.spearmanr(x, y)
pvalspin = fcn_megfmri.get_spinp(x, y, corrval=corr[0], nspin=10000,
corrtype='spearman',
lhannot=lhlabels, rhannot=rhlabels)
title = 'Spearman r = %1.3f - p (spin) = %1.4f' % (corr[0], pvalspin)
xlab = 'Pearson r - training set'
ylab = 'Pearson r - test set'
plt.figure()
fcn_megfmri.scatterregplot(x, y, title, xlab, ylab, 60)
####################################
# ctf metric
####################################
ctf = np.load('../../data/avg_peak_err_ctf_resolutionMatrix_lcmv_Schaefer400.npy')
# correlate and plot
x = ctf
y = rsq
corr = stats.spearmanr(x, y)
pvalspin = fcn_megfmri.get_spinp(x, y, corrval=corr[0], nspin=10000,
corrtype='spearman',
lhannot=lhlabels, rhannot=rhlabels)
title = 'spearman r = %1.3f - p (spin) = %1.4f' % (corr[0], pvalspin)
xlab = 'localization error'
ylab = 'R-squared'
plt.figure()
fcn_megfmri.scatterregplot(x, y, title, xlab, ylab, 60)
# plot on brain surface
toplot = ctf
brains = fcn_megfmri.plot_conte69(toplot, lhlabels, rhlabels,
vmin=np.percentile(toplot, 2.5),
vmax=np.percentile(toplot, 97.5),
colormap='viridis', customcmap=megcmap,
colorbartitle='localization error',
surf='inflated')
####################################
# BigBrain profile intensity
####################################
profileIntensity = np.load('../../data/profileIntensity_schaefer400.npy')
corr_rsq_intensity = np.zeros((1, 50))
pvalspin_rsq_intensity = np.zeros((1, 50))
for surf in range(50):
x = stats.zscore(profileIntensity[surf, :])
y = rsq
corr = stats.spearmanr(x, y)
pvalspin = fcn_megfmri.get_spinp(x, y, corrval=corr[0], nspin=10000,
corrtype='spearman',
lhannot=lhlabels, rhannot=rhlabels)
corr_rsq_intensity[0, surf] = corr[0]
pvalspin_rsq_intensity[0, surf] = pvalspin
multicomp = multitest.multipletests(pvalspin_rsq_intensity.squeeze(),
alpha=0.05, method='fdr_bh')
pointsize = np.zeros((50, 1))
pointsize = np.squeeze(pointsize)
pointsize[multicomp[1] > 0.05] = 1
fig, ax = plt.subplots(1, 1)
ax = sns.scatterplot(np.arange(50), corr_rsq_intensity.flatten(),
hue=corr_rsq_intensity.flatten(),
palette=cmap_seq_r, size=pointsize)
plt.xlabel('cortical depth')
plt.ylabel('Spearman rho - rsq vs profile intensity')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.figure.set_figwidth(8)
ax.figure.set_figheight(4)
plt.tight_layout()
# brain plots
layer = 0
toplot = profileIntensity[layer, :]
brains = fcn_megfmri.plot_conte69(toplot, lhlabels, rhlabels,
vmin=np.percentile(toplot, 2.5),
vmax=np.percentile(toplot, 97.5),
colormap='viridis', customcmap=megcmap,
colorbartitle=('Profile Intensity, layer %s'
% (layer)),
surf='inflated')
####################################
# linear regression: band specific
####################################
# regional model
rsq, full_pred_fc, corrVal = fcn_megfmri.regional_lreg(megdata=avgFCmeg,
fmridata=avgFCmri,
distance=distance,
correct_dist=False,
adjusted_rsq=True)
rsq_band = | pd.DataFrame(rsq, columns=['multiband']) | pandas.DataFrame |
# -*- coding:utf-8 -*-
"""
Fundamental data interface
Created on 2018/09/07
@author: <NAME>
@contact: <EMAIL>
"""
from aushare.stock import cons as ct
import urllib.request
import json
from bs4 import BeautifulSoup
import pandas as pd
from io import StringIO
import csv
import html.parser
import time
from datetime import datetime
import os
import re
def getAllASXListCode():
url = ct.ASXLIST_FILE
if os.path.isfile(ct.ASXLIST_FILE_NAME):
dataFile = ct.ASXLIST_FILE_NAME
else:
data = urllib.request.urlopen(url).read().decode('ascii','ignore')
dataFile = StringIO(data)
df =pd.read_csv(dataFile,header=1)
return df['ASX code']
def getASXListName(code=None):
url = ct.ASXLIST_FILE
if os.path.isfile(ct.ASXLIST_FILE_NAME):
dataFile = ct.ASXLIST_FILE_NAME
else:
data = urllib.request.urlopen(url).read().decode('ascii','ignore')
dataFile = StringIO(data)
df =pd.read_csv(dataFile,header=1)
if (code ==None):
print(df['Company name'])
return df['Company name']
else:
print(df['Company name'][df['ASX code']==code])
return (df['Company name'][df['ASX code']==code])
def getASXListIndustry(code=None):
url = ct.ASXLIST_FILE
if os.path.isfile(ct.ASXLIST_FILE_NAME):
dataFile = ct.ASXLIST_FILE_NAME
else:
data = urllib.request.urlopen(url).read().decode('ascii','ignore')
dataFile = StringIO(data)
df =pd.read_csv(dataFile,header=1)
if (code ==None):
return df['GICS industry group']
else:
return df['GICS industry group'][df['ASX code']==code]
def getCompanyBasicInfo(code=None):
url = ct.ASXLIST_FILE
if os.path.isfile(ct.ASXLIST_FILE_NAME):
dataFile = ct.ASXLIST_FILE_NAME
else:
data = urllib.request.urlopen(url).read().decode('ascii','ignore')
dataFile = StringIO(data)
df =pd.read_csv(dataFile,header=1)
if (code ==None):
print(df['GICS industry group','Company name'])
return df['GICS industry group','Company name']
else:
print(df[['GICS industry group','Company name']][df['ASX code']==code])
return df[['GICS industry group','Company name']][df['ASX code']==code]
#get income from annual report in yahoo finance
def getRevenueDiff(code='APT'):
file_name = ct.REVENUE_FILE%code
try:
if os.path.isfile(file_name):
df = pd.read_csv(file_name,header=0, index_col =0)
else:
urlbase = ct.INCOME_ANNUAL_REPORT
url = urlbase%(code,code)
print(url)
response = urllib.request.urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(response, "lxml")
#tb = soup.find("table",attrs = {"data-reactid":"29"})
tb = soup.find("table")
df1 = pd.read_html(str(tb),header=0,index_col=0)
df =df1[0].T
df2 = pd.to_numeric(df["Total Revenue"],downcast='float')
df3 = pd.DataFrame(data=df2.pct_change(periods=-1)).reset_index()
df3.columns=['date','difference']
except:
print('No revenue report found for the code %s'%code)
df3 =None
return df3
def getRevenue(code='APT'):
file_name = ct.REVENUE_FILE%code
try:
if os.path.isfile(file_name):
df = pd.read_csv(file_name,header=0, index_col =0)
else:
urlbase = ct.INCOME_ANNUAL_REPORT
url = urlbase%(code,code)
print(url)
response = urllib.request.urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(response, "lxml")
#tb = soup.find("table",attrs = {"data-reactid":"29"})
tb = soup.find("table")
df1 = pd.read_html(str(tb),header=0,index_col=0)
df =df1[0].T
except:
print('No revenue report found for the code %s'%code)
df =None
print(df)
return df
def getWeeklyPrice(code='APT',Year='2016'):
file_name = ct.WEEKLY_PRICE_FILE%code
if os.path.isfile(file_name):
df = pd.read_csv(file_name,header=0, index_col =0,date_parser=_parser,skipfooter =1,engine='python')
df.reset_index(inplace =True)
df1 = df[df['Date'].dt.year ==int(Year)]
# df1 = df[df['Date'].str[:4] == Year]
# if df1.empty or df1 is None:
# print('empty')
# df2 = df[df['Date'].str[-4:] == Year]
# return df2
# else:
# return df1
return df1
try:
urlbase = ct.WEEKLY_PRICE
s = "01/01/{}".format(Year)
period1= int(time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple()))
s = "31/12/{}".format(Year)
period2= int(time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple()))
code = code
url = urlbase%(code,period1,period2)
print(url)
response = urllib.request.urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(response, "lxml")
tb = soup.find("table",attrs = {"data-test":"historical-prices"})
na_values = ['NaN', 'N/A', '-']
df1 = pd.read_html(str(tb),header=0,index_col=0,na_values=na_values)
return df1[0]
except:
print('No weekly price found for the code ')
return None
def getMeanPriceDiffPercentage(code,startYear,endYear):
df1 = getWeeklyPrice(code,startYear)
if df1 is None or df1.empty:
return None
try:
meanprice_year1 = df1['Adj. close**'].mean()
except:
meanprice_year1 = df1['Adj Close'].mean()
print(meanprice_year1)
df2 = getWeeklyPrice(code,endYear)
if df2.empty:
return None
try:
meanprice_year2 = df2['Adj. close**'].mean()
except:
meanprice_year2 = df1['Adj Close'].mean()
print(meanprice_year2)
return 0 if meanprice_year1==0 else round(meanprice_year2-meanprice_year1,4)*100/meanprice_year1
#get mean price of certain year
def getYearMeanPrice(code,Year):
df1 = getWeeklyPrice(code,Year)
if df1.empty:
return None
try:
meanprice_year1 = df1['Adj. close**'].mean()
except:
meanprice_year1 = df1['Adj Close'].mean()
print(meanprice_year1)
return round(meanprice_year1,2)
#get balance sheet from annual report in yahoo finance
def getBalanceSheet(code='APT'):
file_name = ct.BALANCE_SHEET_FILE%code
if os.path.isfile(file_name):
df = pd.read_csv(file_name,header=0, index_col =0)
return df
try:
urlbase = ct.BALANCE_SHEET_ANNUAL_REPORT
url = urlbase%(code,code)
print(url)
response = urllib.request.urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(response, "lxml")
tb = soup.find("table")
df1 = pd.read_html(str(tb),header=0,index_col=0)
df =df1[0].T
# df2 = pd.to_numeric(df["Total Revenue"],downcast='float')
# df3 = pd.DataFrame(data=df2.pct_change(periods=-1)).reset_index()
# df3.columns=['date','difference']
except:
print('No balance sheet found for the code %s'%code)
df =None
return df
#get cash flow from annual report in yahoo finance
def getCashflow(code='APT'):
file_name = ct.CASHFLOW_FILE%code
if os.path.isfile(file_name):
df = pd.read_csv(file_name,header=0, index_col =0)
return df
try:
urlbase = ct.CASH_FLOW_ANNUAL_REPORT
url = urlbase%(code,code)
print(url)
response = urllib.request.urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(response, "lxml")
tb = soup.find("table")
df1 = pd.read_html(str(tb),header=0,index_col=0)
df =df1[0].T
except:
print('No cash flow found for the code %s'%code)
df =None
return df
def getPerShareInfo(code = 'APT'):
urlbase = ct.PERSHARE_FINANCIAL_INFO
company_name = getASXListName(code).iloc[0].strip('.').replace(' ','-')
try:
url = urlbase%(code,company_name)
print(url)
response = urllib.request.urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(response, "lxml")
keyword = re.compile(code+' '+'Per Share',re.I)
tb = soup.find(text=keyword).parent.parent.find("table")
df1 = pd.read_html(str(tb),header=0,index_col=0)
df =df1[0]
df.fillna('0',inplace=True)
#df['Sales']=df['Sales'].str.replace('$', '')
print(df['Sales'])
df['Sales']=df['Sales'].str.extract('(-?\d+\.*\d+)').astype(float)
df['Cashflow']=df['Cashflow'].str.extract('(^-?\d+\.*\d+)').astype(float)/100
df['Earnings']=df['Earnings'].str.extract('(^-?\d+\.*\d+)').astype(float)/100
df['Dividends']=df['Dividends'].str.extract('(^-?\d+\.*\d+)').astype(float)/100
df['Book Value']=df['Book Value'].str.extract('(^-?\d+\.*\d+)').astype(float)/100
except:
print('cannot find per share info')
df =None
print(df)
return df
def getStatistics(code = 'APT'):
urlbase = ct.STATISTICS_INFO
url = urlbase%(code,code)
print(url)
try:
response = urllib.request.urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(response, "lxml")
#keyword = re.compile(code+' '+'Per Share',re.I)
keyword ='Valuation measures'
tb = soup.find(text=keyword).parent.parent.next_sibling()[0].find_all("table")
df1 = pd.read_html(str(tb),index_col=0)
df =df1[0].T
df.fillna('0',inplace=True)
print (df['Market cap (intra-day) 5'].iloc[0])
return (df)
except:
print('cannot find STATISTICS_INFO for symbol %s'%code)
return None
def _parser(date):
try:
return pd.datetime.strptime(date, '%d %b %Y')
except:
try:
return | pd.datetime.strptime(date, '%d/%m/%Y') | pandas.datetime.strptime |
from numpy import loadtxt
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score, f1_score
from sklearn import metrics
import smote_variants as sv
from sklearn.linear_model import LogisticRegression
from sklearn import svm
import heapq
# prepare the smote data --> X_samp, y_samp
df = | pd.read_csv("original_data.csv") | pandas.read_csv |
'''
Data sources
-------------------------------------------------------------------------------
Confirmed cases and deaths:
Accessed from https://github.com/CSSEGISandData/COVID-19
Testing and populations:
Accessed from https://covidtracking.com/
- Population is calculated as the sum of populations for all locales
within a state or territory.
- Test positivity is computed as the ratio of reported positive tests to
reported tests, as aggregated in this source.
-------------------------------------------------------------------------------
'''
import pandas as pd
def add_state(total_tests_frame, pos_ratios_frame, name, abbr):
'''
Adds data for a single state or territory to a table accumulating all state
data. Fetches a .json file, then separates out total tests over time and
computed positivity ratio over time, and adds these columns to the
respective DataFrames passed in.
Params
DataFrame `total_tests_frame`: table of combined per-state time series
DataFrame `pos_ratios_frame`: table of combined per-state time series
string `name`: column name for state/territory
string `abbr`: 2-letter abbreviation for state/territory
Returns
DataFrame `total_tests_frame`: copy of input with additional column added
DataFrame `pos_ratios_frame`: copy of input with additional column added
'''
data_url = f"https://covidtracking.com/api/v1/states/%s/daily.json" % abbr.lower()
data = | pd.read_json(data_url) | pandas.read_json |
"""
library for simulating semi-analytic mock maps of CMB secondary anisotropies
"""
__author__ = ["<NAME>", "<NAME>"]
__email__ = ["<EMAIL>", "<EMAIL>"]
import os
import warnings
from sys import getsizeof
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
from warnings import warn
import inspect
from itertools import product
import operator
import re
from functools import partial
from tqdm.auto import tqdm
from astropaint.lib.log import CMBAlreadyAdded, NoiseAlreadyAdded
try:
import healpy as hp
except ModuleNotFoundError:
warn("Healpy is not installed. You cannot use the full sky canvas without it.")
from astropy.coordinates import cartesian_to_spherical
from .lib import transform, utils
# find the package path; same as __path__
path_dir = os.path.dirname(os.path.abspath(__file__))
#########################################################
# Halo Catalog Object
#########################################################
class Catalog:
"""halo catalog containing halo masses, locations, velocities, and redshifts
Units
-----
x, y, z: [Mpc]
v_x, v_y, v_z: [km/s]
M_200c: [M_sun]
"""
def __init__(self,
data=None,
calculate_redshifts=False,
default_redshift=0,
):
"""
Parameters
----------
data: dataframe or str
Input data can be either a pandas dataframe or any table with the
following columns:
["x", "y", "z", "v_x", "v_y", "M_200c"]
Alternatively data can be set to a string indicating the name of
a halo catalog to be loaded. There are various options for the input
string:
"random box" and "random shell" (case insensitive) respectively call
.generate_random_box() and .generate_random_shell() methods with the
default arguments.
"test" generates 6 test halos in the positive and negative x, y, z
directions. This is useful for testing and building prototypes.
Any other string will be looked up as the name of a csv file under
astropaint/data/
e.g. "websky", "MICE", or "Sehgal"
calculate_redshifts: bool
if True, redshifts of objects will be calculated from the comoving
distance according to the latest Planck cosmology (astropy.cosmo.Planck18_arXiv_v2)
This can be numerically expensive for large catalogs so if your
catalog already comes with redshifts, set this to False to save time.
default_redshift: float
If calculate_redshift is set to False, this value will be used as the
default redshift for all the halos.
"""
#TODO: define attribute dictionary with __slots__
self._build_counter = 0
self.calculate_redshifts = calculate_redshifts
# if calculate_redshifts==False, assume this redshift for everything
self.default_redshift = default_redshift
# if no input is provided generate a random catalog
if data is None:
self.data = self._initialize_catalog(1)
#self.generate_random_box()
elif isinstance(data, str):
if re.match(".*random.*box", data, re.IGNORECASE):
self.generate_random_box()
elif re.match(".*random.*shell", data, re.IGNORECASE):
self.generate_random_shell()
elif re.match(".*test.*", data, re.IGNORECASE):
self.generate_test_box(configuration=["all"])
else:
self.load_from_csv(data)
else:
#FIXME: check data type and columns
self.data = data
# .................
# octant signatures
# .................
# (x,y,z) signatures for each octant e.g. (+,+,+) , (+,+,-) etc.
self.octant_signature = self._get_octant_signatures(mode="user")
# same thing but for use in calculations
self._octant_shift_signature = self._get_octant_signatures(mode="shift")
self._octant_mirror_signature = self._get_octant_signatures(mode="mirror")
self._octant_rotate_signature = self._get_octant_signatures(mode="rotate")
# TODO: check input type/columns/etc
# ------------------------
# properties
# ------------------------
@property
def data(self):
return self._data
@data.setter
def data(self, val):
self._data = val
self._data = pd.DataFrame(self.data).reset_index(drop=True)
self.size = len(self._data)
self.box_size = self._get_box_size()
if self._build_counter>0:
print("Catalog data has been modified...\n")
# build the complete data frame
# e.g. angular distances, radii, etc.
self.build_dataframe(calculate_redshifts=self.calculate_redshifts,
default_redshift=self.default_redshift)
# ------------------------
# sample data
# ------------------------
#TODO: support inputs other than csv
def load_from_csv(self, sample_name="MICE"):
"""load sample data using the name of dataset"""
if not sample_name.endswith(".csv"):
sample_name += ".csv"
fname = os.path.join(path_dir, "data", f"{sample_name}")
print(f"Catalog loaded from:\n{fname}")
self.data = pd.read_csv(fname, index_col=0)
def save_to_csv(self, sample_name):
"""load sample data using the name of dataset"""
if not sample_name.endswith(".csv"):
sample_name += ".csv"
fname = os.path.join(path_dir, "data", f"{sample_name}")
self.data.to_csv(fname)
print(f"Catalog saved to:\n{fname}")
def generate_random_box(self,
box_size=50,
v_max=100,
mass_min=1E14,
mass_max=1E15,
n_tot=50000,
put_on_shell=False,
inplace=True,
):
catalog = self._initialize_catalog(n_tot)
print("generating random catalog...\n")
# generate random positions
x, y, z = np.random.uniform(low=-box_size/2,
high=box_size/2,
size=(3, n_tot))
if put_on_shell:
(x, y, z) = box_size * np.true_divide((x, y, z), np.linalg.norm((x, y, z), axis=0))
catalog["x"], catalog["y"], catalog["z"] = x, y, z
# generate random velocities
v_x, v_y, v_z = np.random.uniform(low=-v_max,
high=v_max,
size=(3, n_tot))
catalog["v_x"], catalog["v_y"], catalog["v_z"] = v_x, v_y, v_z
# generate random log uniform masses
catalog["M_200c"] = np.exp(np.random.uniform(low=np.log(mass_min),
high=np.log(mass_max),
size=n_tot))
if inplace:
self.data = pd.DataFrame(catalog)
else:
return pd.DataFrame(catalog) # convert catalog to pandas data frame
def generate_random_shell(self,
shell_radius=50,
v_max=100,
mass_min=1E14,
mass_max=1E15,
n_tot=50000,
inplace=True,
):
catalog = self._initialize_catalog(n_tot)
print("generating random catalog...\n")
# generate random points according to http://mathworld.wolfram.com/SpherePointPicking.html
u,v = np.random.uniform(low=0,
high=1,
size=(2, n_tot))
phi = 2 * np.pi * u
theta = np.arccos(2 * v -1)
# (x, y, z) = box_size * np.true_divide((x, y, z), np.linalg.norm((x, y, z), axis=0))
catalog["x"], catalog["y"], catalog["z"] = np.sin(theta) * np.cos(phi),\
np.sin(theta) * np.sin(phi),\
np.cos(theta)
catalog[["x", "y", "z"]] *= shell_radius
# generate random velocities
v_x, v_y, v_z = np.random.uniform(low=-v_max,
high=v_max,
size=(3, n_tot))
catalog["v_x"], catalog["v_y"], catalog["v_z"] = v_x, v_y, v_z
# generate random log uniform masses
catalog["M_200c"] = np.exp(np.random.uniform(low=np.log(mass_min),
high=np.log(mass_max),
size=n_tot))
if inplace:
self.data = pd.DataFrame(catalog)
else:
return pd.DataFrame(catalog) # convert catalog to pandas data frame
def generate_test_box(self,
configuration=["all"],
distance=100,
mass=1E15,
inplace=True,
):
catalog = pd.DataFrame(self._initialize_catalog(0))
config_dict = {"front": (1, 0, 0),
"back": (-1, 0, 0),
"left": (0, 1, 0),
"right": (0, -1, 0),
"top": (0, 0, 1),
"bottom": (0, 0, -1),
}
# set configuration for "all" keyword
if "all" in configuration:
configuration = config_dict.keys()
for key in configuration:
# get the coordinates from config_dic and load it in a dataframe
x, y, z = config_dict[key]
df = pd.DataFrame(Catalog._initialize_catalog(1))
df["x"], df["y"], df["z"] = x, y, z
df[["x", "y", "z"]] *= distance
# set the mass
df["M_200c"] = mass
# append the test case to the catalog
catalog = catalog.append(df, ignore_index=True)
if inplace:
self.data = pd.DataFrame(catalog)
else:
return pd.DataFrame(catalog) # return the pandas dataframe
# ------------------------
# methods
# ------------------------
def build_dataframe(self,
calculate_redshifts=False,
default_redshift=0):
#TODO: add units documentation to the catalog for reference
self._build_counter = 1
print("Building the dataframe and updating all the parameters...\n")
# calculate the comoving distance and angular position (theta and phi in radians)
self.data['D_c'], self.data['lat'], self.data['lon'] = cartesian_to_spherical(
self.data['x'].values,
self.data['y'].values,
self.data['z'].values)
if calculate_redshifts:
tqdm.pandas(desc="calculating redshifts")
self.data['redshift'] = self.data['D_c'].progress_apply(transform.D_c_to_redshift)
else:
try:
self.data['redshift']
except KeyError:
self.data['redshift'] = pd.Series([default_redshift]*len(self.data['D_c']))
# theta = pi/2 - lat , phi = lon
self.data['theta'] = np.pi / 2 - self.data['lat']
self.data['phi'] = self.data['lon']
# convert lonlat coords to deg
self.data['lon'], self.data['lat'] = np.rad2deg((self.data['lon'], self.data['lat']))
# calculate angular diameter distance, virial radius and angular size
self.data['D_a'] = transform.D_c_to_D_a(self.data['D_c'], self.data['redshift'])
self.data['R_200c'] = transform.M_200c_to_R_200c(self.data['M_200c'], self.data['redshift'])
self.data['c_200c'] = transform.M_200c_to_c_200c(self.data['M_200c'], self.data['redshift'])
self.data['R_ang_200c'] = transform.radius_to_angsize(self.data['R_200c'],
self.data['D_a'], arcmin=True)
#TODO: change redshift to nonuniversal value
self.data['rho_s'] = transform.M_200c_to_rho_s(self.data['M_200c'],
self.data['redshift'],
self.data['R_200c'],
self.data['c_200c'])
self.data['R_s'] = np.true_divide(self.data['R_200c'], self.data['c_200c'])
# find the cartesian to spherical coords transformation matrix
J_cart2sph = transform.get_cart2sph_jacobian(self.data['theta'].values,
self.data['phi'].values)
# J_sph2cart = transform.sph2cart(self.data['co-lat'].values,self.data['lon'].values)
# transform the velocity field and define v_r (radial), v_th (co-latitude), v_ph (longitude)
v_cart = np.array([self.data['v_x'], self.data['v_y'], self.data['v_z']])
self.data['v_r'], self.data['v_th'], self.data['v_ph'] = np.einsum('ij...,i...->j...',
J_cart2sph, v_cart)
self.data['v_lat'] = -self.data['v_th']
self.data['v_lon'] = self.data['v_ph']
print("Done!")
def _get_box_size(self):
"""find the catalog box size from x, y, z coordinates"""
Lx = self.data["x"].max() - self.data["x"].min()
Ly = self.data["y"].max() - self.data["y"].min()
Lz = self.data["z"].max() - self.data["z"].min()
return Lx, Ly, Lz
@staticmethod
def _get_octant_signatures(mode="user"):
"""calculate the octant signatures to be used in replication function later"""
# set up the octant signature with +1, -1 indicating the sign of each axis
# e.g. (+1,+1,+1) is the first octant/ (-1,+1,+1) is the second octant etc.
x_signs = np.sign(np.exp(1.j * (np.arange(8) * np.pi / 2 + np.pi / 4)).real).astype(int)
y_signs = np.sign(np.exp(1.j * (np.arange(8) * np.pi / 2 + np.pi / 4)).imag).astype(int)
z_signs = np.array(4 * [1] + 4 * [-1])
# put them together as a reference dictionary
oct_sign_dict = dict(enumerate(zip(x_signs, y_signs, z_signs)))
if mode == "user":
sign_dict = {1: "+",
-1: "-"}
# (x,y,z) signatures for each octant e.g. (+,+,+) , (+,+,-) etc.
octant_signature = [(sign_dict[i], sign_dict[j], sign_dict[k])
for (i, j, k) in oct_sign_dict.values()]
elif mode == "shift":
sign_dict = {1 : 0,
-1: -1}
octant_signature = [(sign_dict[i], sign_dict[j], sign_dict[k])
for (i, j, k) in oct_sign_dict.values()]
elif mode == "mirror":
octant_signature = product((+1, -1), repeat=3)
elif mode == "rotate":
# octant signature for replication by rotation
octant_signature = sorted(product([0, 1, 2, 3], [1, -1]),
key=operator.itemgetter(1),
reverse=True)
else:
raise KeyError("octant signature mode not defined")
octant_signature = dict(enumerate(octant_signature))
return octant_signature
@staticmethod
def _initialize_catalog(n_tot):
"""initialize an empty catalog with x, y, z, v_x, v_y, v_z, M_200c columns"""
dtype = {"names": ["x", "y", "z", "v_x", "v_y", "v_z", "M_200c"],
"formats": 7 * [np.float32]}
catalog = np.zeros(n_tot, dtype)
return | pd.DataFrame(catalog) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 9 13:56:10 2018
@author: dugj2403
"""
import os
from glob import glob
import pandas as pd
import numpy as np
from scipy.interpolate import griddata
from copy import deepcopy
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from pandas import DataFrame
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def rename_axis(df,axis_in,axis_out):
"""
Replace axis_in[i] with axis_out[i]
"""
df=deepcopy(df)
for count, column in enumerate(axis_in):
df.rename(columns={column:axis_out[count]}, inplace=True)
return df
def get_PIV_files(path,**keyword_parameters):
"""Recursively look in directory specified as path and its subdirectories
for PIV .dat files. All '.dat' files are found by looking recursively
in the path, these are returned in the 'full_list'. The treatment
Args:
path (str): Directory to look in.
treatment (str): keyword used in file name to identify treatment
Returns:
treatment_1 (list): containing file paths for treatment 1
treatment_2 (list): containing file paths for treatment identified by "treatment" keyword
"""
result = [y for x in os.walk(path) for y in glob(os.path.join(x[0], '*.dat'))]
return_list=[]
for i in result:
if ('treatment' in keyword_parameters):
treatment=keyword_parameters['treatment']
if treatment in i:
return_list.append(i)
else:
return_list.append(i)
return return_list
def load_PIV_dataframes(file_list,plane_labels,shot_labels,fieldNames,fieldKeys,**keyword_parameters):
"""Load data contained in .dat into pandas dataframes, neatly organized in a
heirachical dictionary. Must contain 'x', 'y' and 'scalar' data.
Your scalar field should be exported in DaVis as a Tecplot .dat file. Planes
are physically different locations of the laser light sheet. Shots are repetitions
at the same plane. If you only have 1 plane and 1 shot, name your file something
like 'Plane_1_shot_1' anyway, and then use these to populate the plane_labels
and shot_labels lists.
Args:
file_list [list]: list from find_PIV_files() containing file paths
Plane_labels [list]: strings of keywords used in file path to identify 'planes'
shot_labels [list]: strings of keywords used in file path to identify 'shots'
fieldKeys [list]: strings of keywords used in file path to identify each scalar field (e.g. "B0001" - crappy DaVis output)
fieldNames [list]: strings of keywords corresponding to fieldKeys ("B0001" corresponds to 'u')
Returns:
shot_dic {dict}: two-level dictionary. shot_labels are outer keys and fieldNames are inner keys.
each inner key is attributed a dataframe with corresponding dataframe containing scalar data
"""
print(shot_labels)
plane_dic={}
for file in file_list:
for plane in plane_labels:
if plane in file:
if plane not in plane_dic:
plane_dic[plane] = {}
for shot in shot_labels:
if shot in file:
if shot not in plane_dic[plane]:
plane_dic[plane][shot] = {}
for index, fieldKey in enumerate(fieldKeys):
if fieldNames[index] not in plane_dic[plane][shot]:
plane_dic[plane][shot][fieldNames[index]]={}
if fieldKey in file:
df=pd.read_table(file,skiprows=[0,1,2],delim_whitespace=True,names=['x','y',fieldNames[index],'valid'])
print(shot,fieldKey,index,fieldNames[index])
plane_dic[plane][shot][fieldNames[index]]=df
if ('rename_axis' in keyword_parameters):
if keyword_parameters['rename_axis']==True:
axis_in=keyword_parameters['axis_in']
axis_out=keyword_parameters['axis_out']
for plane in plane_dic:
for shot in plane_dic[plane]:
for frame in plane_dic[plane][shot]:
plane_dic[plane][shot][frame]=rename_axis(plane_dic[plane][shot][frame],axis_in,axis_out)
return plane_dic
def process_piv_dictionary(dictionary,scalar_name,plane_labels,plane_positions,**keyword_parameters):
print("\n")
print("Processing dictionary containing PIV planes and shots:")
return_dict={}
for counter, plane in enumerate(plane_labels):
print("\n")
print("Processing %s" %plane)
if 'average' in keyword_parameters:
if keyword_parameters['average']:
axis=keyword_parameters['axis']
if 'prefix' in keyword_parameters:
prefix=keyword_parameters['prefix']
df=average_PIV_field(dictionary[plane],axis,scalar_name,prefix=prefix)
else:
df=average_PIV_field(dictionary[plane],axis,scalar_name)
if 'geoRef' in keyword_parameters:
if keyword_parameters['geoRef']:
geoRefCoords=keyword_parameters['geoRefCoords']
#get current geoRefCoord
geoRefCoord=[0,0,0]
for i, axis in enumerate(geoRefCoords):
if type(axis) is int:
geoRefCoord[i]=axis
if type(axis) is list:
geoRefCoord[i]=axis[counter]
if type(axis) is float:
geoRefCoord[i]=axis
print("Georeferencing %s with %s" %(plane,str(geoRefCoord)))
try:
#only if 'average'
df=piv.georeference(df,geoRefCoord)
except NameError:
single_shot_ID=list(dictionary[plane].keys())[0]
df=georeference(dictionary[plane][single_shot_ID][scalar_name],geoRefCoord)
if 'crop' in keyword_parameters:
if keyword_parameters['crop']:
try:
limits_x=keyword_parameters['limits_x']
except KeyError:
pass
try:
limits_y=keyword_parameters['limits_y']
except KeyError:
pass
try:
limits_z=keyword_parameters['limits_z']
except KeyError:
pass
if 'limits_x' in keyword_parameters:
if 'limits_y' in keyword_parameters:
print('Cropping %s along x and y' % plane)
df=crop_scalar_field(df,limits_x=limits_x,limits_y=limits_y)
if 'limits_x' in keyword_parameters:
if 'limits_z' in keyword_parameters:
print('Cropping %s along x and z' % plane)
df=crop_scalar_field(df,limits_x=limits_x,limits_z=limits_z)
if 'limits_y' in keyword_parameters:
if 'limits_z' in keyword_parameters:
print('Cropping %s along y and z' % plane)
df=crop_scalar_field(df,limits_y=limits_y,limits_z=limits_z)
return_dict.update({plane : df})
del df
del keyword_parameters
return return_dict
def average_PIV_field(dictionary,axis,field, **keyword_parameters):
"""Average 2D scalar values across multiple 'shots', where a 'shot' represents
a sequence of PIV images lasting a duration of time. Multiple shots make up a PIV
measurement of plane. Example, five 20 s shots make a total of 100 s of PIV. This function
takes the average of each of the five 20 s shots.
Args:
dictionary (str): heirachical dictionary containing 'shot_xx' entries as the master key
each scalar field for that shot is contained within a subdictionary as
a dataframe that can be accessed with a keyword (e.g. "u" or "TKE")
field (str): keyword corresponding to both subdictionary key and header of dataframe
Returns:
df (dataframe): containing data from all shots and the averaged scalar field under
the header "mean_xx", where xx = field
"""
df=pd.DataFrame()
for shot in dictionary:
print("Including %s for averaging" %shot )
df[shot+'_'+field]=dictionary[shot][field][field]
if ('prefix' in keyword_parameters):
prefix=keyword_parameters['prefix']
df[prefix+field] = df.mean(axis=1)
name=prefix+field
else:
df[field] = df.mean(axis=1)
name=field
df[axis[0]]=dictionary[shot][field][axis[0]]
df[axis[1]]=dictionary[shot][field][axis[1]]
df=df[[axis[0],axis[1],name]]
if ('output' in keyword_parameters):
output = keyword_parameters['output']
if ('path' in keyword_parameters):
path = keyword_parameters['path']
if ('prefix' in keyword_parameters):
prefix = keyword_parameters['prefix']
if output:
try:
path=path + "\\" + "avg_%s_%s.csv" % (field, prefix)
except UnboundLocalError:
path=path + "\\" + "avg_%s.csv" % field
df.to_csv(path, columns=[axis[0],axis[1],name],index=False)
return df
def flip_horizontally(df,axis_name):
"""
Flips x axis of PIV dataframe to fit within a correct coordinate convention.
Args:
df (dataframe): dataframe to be flipped
axis_name (string): key of dataframe column to flip
"""
df=deepcopy(df)
df[axis_name]=df[axis_name]*(-1)
return df
def georeference(df,geoRef):
df=deepcopy(df)
try:
df['x'] = df['x']+geoRef[0]
except KeyError:
df['x'] = geoRef[0]
try:
df['y'] = df['y']+geoRef[1]
except KeyError:
df['y'] = geoRef[1]
try:
df['z'] = df['z']+geoRef[2]
except KeyError:
df['z'] = geoRef[2]
return df
def inverse_scalar(df,scalar):
df=deepcopy(df)
df[scalar]=df[scalar]*-1
return df
def smooth(df,columns,names,period,**keyword_parameters):
"""Smooths out spikey data in a 3D trajectory by running a moving average
over specified columns of input dataframe. Optionally, the smoothed curve
can be shifted back close to its originally position by including the shift
optional argument.
Args:
df (dataframe): pandas dataframe
columns (list): list of dataframe headers to smooth
names (list): names of new columns in same order as columns
Returns:
dataframe: pandas dataframe containing smoothed and/or shifted columns.
"""
df=deepcopy(df)
for i in range(len(columns)):
df[names[i]]=df[columns[i]].rolling(period).mean()
if ('shift' in keyword_parameters):
shift = keyword_parameters['shift']
if shift:
shift_names=keyword_parameters['shift_names']
shift_period=keyword_parameters['shift_period']
for i in range(len(columns)):
df[shift_names[i]]=df[names[i]].shift(shift_period)
return df
def extractProfile(df,x_axis,y_axis,value):
exactmatch=df[df[x_axis]==value]
if not exactmatch.empty:
return exactmatch
else:
lowerneighbour_ind = df[df[x_axis]<value][x_axis].idxmax()
upperneighbour_ind = df[df[x_axis]>value][x_axis].idxmin()
lowValue=df.iloc[lowerneighbour_ind][x_axis]
highValue=df.iloc[upperneighbour_ind][x_axis]
print("The closest positions to %f along the specified x_axis are: %f and %f" %(value,lowValue,highValue))
print("Average values for these positions have been returned at x_axis = %f" %((lowValue+highValue)/2))
print("Reminder: 'x_axis' refers to the axis along which you want to extract the profile")
print(" 'y_axis' refers to the plottable values at the specified x_axis position")
df=df[(df[x_axis] == lowValue)|(df[x_axis] == highValue)]
df=df.groupby([y_axis]).mean().reset_index()
return df
def extract_2D_slice(dictionary,axis,position):
dictionary=deepcopy(dictionary)
slices=[]
for plane in dictionary:
dictionary[plane]=dictionary[plane].sort_values(axis,ascending=False).assign(New=(dictionary[plane][axis]-position).abs())
# dictionary[plane]=dictionary[plane].drop(dictionary[plane][dictionary[plane].New != dictionary[plane].New.min() ].index)
slices.append(dictionary[plane])
slices= | pd.concat(slices) | pandas.concat |
# MIT License
# Copyright (c) 2018 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from pyTD.auth import auth_check
from pyTD.resource import Get
logger = logging.getLogger(__name__)
class MarketData(Get):
"""
Base class for retrieving market-based information. This includes the
following endpoint groups:
- Market Hours
- Movers
- Option Chains
- Price History
- Quotes
Parameters
----------
symbols: str or list-like, optional
A symbol or list of symbols
output_format: str, optional, default 'json'
Desired output format (json or Pandas DataFrame)
api: pyTD.api.api object, optional
A pyTD api object. If not passed, API requestor defaults to
pyTD.api.default_api
"""
def __init__(self, output_format='pandas', api=None):
self.output_format = output_format
super(MarketData, self).__init__(api)
@property
def endpoint(self):
return "marketdata"
@property
def resource(self):
raise NotImplementedError
@property
def url(self):
return "%s%s/%s" % (self._BASE_URL, self.endpoint, self.resource)
def _convert_output(self, out):
import pandas as pd
return | pd.DataFrame(out) | pandas.DataFrame |
import pandas as pd
from pyhive import hive
import thrift
import csv
import configparser
import logging
import subprocess
class Utility:
def __init__(self,loggername,config):
self.logger = logging.getLogger(loggername)
self.config = config
def dataConversion(self,pm_data, dir_path, flag):
try:
timestamp = self.config.get('Parameters', 'timestamp')
section_name = self.config.get('Parameters', 'section')
module_name = self.config.get('Parameters', 'devices')
val = self.config.get('Parameters', 'val')
measure = self.config.get('Parameters', 'measure')
common = self.config.get('Parameters', 'device.types')
parent_columns = self.config.get('Parameters', 'parent.columns')
actual = self.config.get('General','actual.dir')
metric_val = self.config.get('Parameters', 'metrics')
metric_list = metric_val.split(',')
sections_list = list(pm_data[section_name].unique())
for section in sections_list:
section_df = pm_data[pm_data[section_name]==section]
modules_list = common.split(',')
converted_df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from datetime import datetime, timedelta
import os
import pandas as pd
from azure.monitor.query import LogsQueryClient, LogsBatchQuery
from azure.identity import DefaultAzureCredential
credential = DefaultAzureCredential()
client = LogsQueryClient(credential)
# [START send_query_batch]
requests = [
LogsBatchQuery(
query="AzureActivity | summarize count()",
duration=timedelta(hours=1),
workspace_id= os.environ['LOG_WORKSPACE_ID']
),
LogsBatchQuery(
query= """AppRequests | take 10 |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""",
duration=timedelta(hours=1),
start_time=datetime(2021, 6, 2),
workspace_id= os.environ['LOG_WORKSPACE_ID']
),
LogsBatchQuery(
query= "AppRequestss | take 5",
workspace_id= os.environ['LOG_WORKSPACE_ID'],
include_statistics=True
),
]
responses = client.query_batch(requests)
for response in responses:
try:
table = response.tables[0]
df = | pd.DataFrame(table.rows, columns=[col.name for col in table.columns]) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
isna,
notna,
)
import pandas._testing as tm
def test_expanding_corr(series):
A = series.dropna()
B = (A + np.random.randn(len(A)))[:-5]
result = A.expanding().corr(B)
rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_count(series):
result = series.expanding(min_periods=0).count()
tm.assert_almost_equal(
result, series.rolling(window=len(series), min_periods=0).count()
)
def test_expanding_quantile(series):
result = series.expanding().quantile(0.5)
rolling_result = series.rolling(window=len(series), min_periods=1).quantile(0.5)
tm.assert_almost_equal(result, rolling_result)
def test_expanding_cov(series):
A = series
B = (A + np.random.randn(len(A)))[:-5]
result = A.expanding().cov(B)
rolling_result = A.rolling(window=len(A), min_periods=1).cov(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_cov_pairwise(frame):
result = frame.expanding().cov()
rolling_result = frame.rolling(window=len(frame), min_periods=1).cov()
tm.assert_frame_equal(result, rolling_result)
def test_expanding_corr_pairwise(frame):
result = frame.expanding().corr()
rolling_result = frame.rolling(window=len(frame), min_periods=1).corr()
tm.assert_frame_equal(result, rolling_result)
@pytest.mark.parametrize(
"func,static_comp",
[("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
ids=["sum", "mean", "max", "min"],
)
def test_expanding_func(func, static_comp, frame_or_series):
data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
result = getattr(data.expanding(min_periods=1, axis=0), func)()
assert isinstance(result, frame_or_series)
if frame_or_series is Series:
tm.assert_almost_equal(result[10], static_comp(data[:11]))
else:
tm.assert_series_equal(
result.iloc[10], static_comp(data[:11]), check_names=False
)
@pytest.mark.parametrize(
"func,static_comp",
[("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
ids=["sum", "mean", "max", "min"],
)
def test_expanding_min_periods(func, static_comp):
ser = Series(np.random.randn(50))
result = getattr(ser.expanding(min_periods=30, axis=0), func)()
assert result[:29].isna().all()
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
# min_periods is working correctly
result = getattr(ser.expanding(min_periods=15, axis=0), func)()
assert isna(result.iloc[13])
assert notna(result.iloc[14])
ser2 = Series(np.random.randn(20))
result = getattr(ser2.expanding(min_periods=5, axis=0), func)()
assert isna(result[3])
assert notna(result[4])
# min_periods=0
result0 = getattr(ser.expanding(min_periods=0, axis=0), func)()
result1 = getattr(ser.expanding(min_periods=1, axis=0), func)()
tm.assert_almost_equal(result0, result1)
result = getattr(ser.expanding(min_periods=1, axis=0), func)()
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
def test_expanding_apply(engine_and_raw, frame_or_series):
engine, raw = engine_and_raw
data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
result = data.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isinstance(result, frame_or_series)
if frame_or_series is Series:
tm.assert_almost_equal(result[9], np.mean(data[:11]))
else:
tm.assert_series_equal(result.iloc[9], np.mean(data[:11]), check_names=False)
def test_expanding_min_periods_apply(engine_and_raw):
engine, raw = engine_and_raw
ser = Series(np.random.randn(50))
result = ser.expanding(min_periods=30).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert result[:29].isna().all()
tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
# min_periods is working correctly
result = ser.expanding(min_periods=15).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isna(result.iloc[13])
assert notna(result.iloc[14])
ser2 = Series(np.random.randn(20))
result = ser2.expanding(min_periods=5).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isna(result[3])
assert notna(result[4])
# min_periods=0
result0 = ser.expanding(min_periods=0).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
result1 = ser.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
tm.assert_almost_equal(result0, result1)
result = ser.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
def test_expanding_apply_consistency_sum_nans(consistency_data, min_periods, f):
x, is_constant, no_nans = consistency_data
if f is np.nansum and min_periods == 0:
pass
else:
expanding_f_result = x.expanding(min_periods=min_periods).sum()
expanding_apply_f_result = x.expanding(min_periods=min_periods).apply(
func=f, raw=True
)
tm.assert_equal(expanding_f_result, expanding_apply_f_result)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_expanding_apply_consistency_sum_no_nans(consistency_data, min_periods, f):
x, is_constant, no_nans = consistency_data
if no_nans:
if f is np.nansum and min_periods == 0:
pass
else:
expanding_f_result = x.expanding(min_periods=min_periods).sum()
expanding_apply_f_result = x.expanding(min_periods=min_periods).apply(
func=f, raw=True
)
tm.assert_equal(expanding_f_result, expanding_apply_f_result)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
mean_x = x.expanding(min_periods=min_periods).mean()
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
assert not (var_x < 0).any().any()
if ddof == 0:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = (x * x).expanding(min_periods=min_periods).mean()
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var_constant(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
if is_constant:
count_x = x.expanding(min_periods=min_periods).count()
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if ddof == 1:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_std(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
std_x = x.expanding(min_periods=min_periods).std(ddof=ddof)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_cov(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
assert not (var_x < 0).any().any()
cov_x_x = x.expanding(min_periods=min_periods).cov(x, ddof=ddof)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_series_cov_corr(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
if isinstance(x, Series):
var_x_plus_y = (x + x).expanding(min_periods=min_periods).var(ddof=ddof)
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
var_y = x.expanding(min_periods=min_periods).var(ddof=ddof)
cov_x_y = x.expanding(min_periods=min_periods).cov(x, ddof=ddof)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
corr_x_y = x.expanding(min_periods=min_periods).corr(x)
std_x = x.expanding(min_periods=min_periods).std(ddof=ddof)
std_y = x.expanding(min_periods=min_periods).std(ddof=ddof)
tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if ddof == 0:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_x = x.expanding(min_periods=min_periods).mean()
mean_y = x.expanding(min_periods=min_periods).mean()
mean_x_times_y = (x * x).expanding(min_periods=min_periods).mean()
tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_mean(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
result = x.expanding(min_periods=min_periods).mean()
expected = (
x.expanding(min_periods=min_periods).sum()
/ x.expanding(min_periods=min_periods).count()
)
tm.assert_equal(result, expected.astype("float64"))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_constant(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
if is_constant:
count_x = x.expanding().count()
mean_x = x.expanding(min_periods=min_periods).mean()
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = x.expanding(min_periods=min_periods).corr(x)
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
tm.assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
tm.assert_equal(corr_x_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_var_debiasing_factors(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
# check variance debiasing factors
var_unbiased_x = x.expanding(min_periods=min_periods).var()
var_biased_x = x.expanding(min_periods=min_periods).var(ddof=0)
var_debiasing_factors_x = x.expanding().count() / (
x.expanding().count() - 1.0
).replace(0.0, np.nan)
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
@pytest.mark.parametrize(
"f",
[
lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)),
lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)),
],
)
def test_moment_functions_zero_length_pairwise(f):
df1 = DataFrame()
df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
df2["a"] = df2["a"].astype("float64")
df1_expected = DataFrame(
index=MultiIndex.from_product([df1.index, df1.columns]), columns=Index([])
)
df2_expected = DataFrame(
index=MultiIndex.from_product([df2.index, df2.columns], names=["bar", "foo"]),
columns=Index(["a"], name="foo"),
dtype="float64",
)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.expanding().count(),
lambda x: x.expanding(min_periods=5).cov(x, pairwise=False),
lambda x: x.expanding(min_periods=5).corr(x, pairwise=False),
lambda x: x.expanding(min_periods=5).max(),
lambda x: x.expanding(min_periods=5).min(),
lambda x: x.expanding(min_periods=5).sum(),
lambda x: x.expanding(min_periods=5).mean(),
lambda x: x.expanding(min_periods=5).std(),
lambda x: x.expanding(min_periods=5).var(),
lambda x: x.expanding(min_periods=5).skew(),
lambda x: x.expanding(min_periods=5).kurt(),
lambda x: x.expanding(min_periods=5).quantile(0.5),
lambda x: x.expanding(min_periods=5).median(),
lambda x: x.expanding(min_periods=5).apply(sum, raw=False),
lambda x: x.expanding(min_periods=5).apply(sum, raw=True),
],
)
def test_moment_functions_zero_length(f):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
df2_expected = df2
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
def test_expanding_apply_empty_series(engine_and_raw):
engine, raw = engine_and_raw
ser = Series([], dtype=np.float64)
tm.assert_series_equal(
ser, ser.expanding().apply(lambda x: x.mean(), raw=raw, engine=engine)
)
def test_expanding_apply_min_periods_0(engine_and_raw):
# GH 8080
engine, raw = engine_and_raw
s = Series([None, None, None])
result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw, engine=engine)
expected = Series([1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_expanding_cov_diff_index():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().cov(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().cov(s2)
expected = Series([None, None, None, 4.5])
tm.assert_series_equal(result, expected)
def test_expanding_corr_diff_index():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().corr(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().corr(s2)
expected = Series([None, None, None, 1.0])
tm.assert_series_equal(result, expected)
def test_expanding_cov_pairwise_diff_length():
# GH 7512
df1 = DataFrame([[1, 5], [3, 2], [3, 9]], columns=Index(["A", "B"], name="foo"))
df1a = DataFrame(
[[1, 5], [3, 9]], index=[0, 2], columns=Index(["A", "B"], name="foo")
)
df2 = DataFrame(
[[5, 6], [None, None], [2, 1]], columns=Index(["X", "Y"], name="foo")
)
df2a = DataFrame(
[[5, 6], [2, 1]], index=[0, 2], columns=Index(["X", "Y"], name="foo")
)
# TODO: xref gh-15826
# .loc is not preserving the names
result1 = df1.expanding().cov(df2, pairwise=True).loc[2]
result2 = df1.expanding().cov(df2a, pairwise=True).loc[2]
result3 = df1a.expanding().cov(df2, pairwise=True).loc[2]
result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2]
expected = DataFrame(
[[-3.0, -6.0], [-5.0, -10.0]],
columns=Index(["A", "B"], name="foo"),
index=Index(["X", "Y"], name="foo"),
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
def test_expanding_corr_pairwise_diff_length():
# GH 7512
df1 = DataFrame(
[[1, 2], [3, 2], [3, 4]], columns=["A", "B"], index=Index(range(3), name="bar")
)
df1a = DataFrame(
[[1, 2], [3, 4]], index= | Index([0, 2], name="bar") | pandas.Index |
# generates accuracy and predictions using ML classifiers
# Requires generation of simulated data from "sim_speeches.r"
import os
import cPickle as pickle
import sys
import logging
import pandas as pd
import numpy as np
import re
import string
import itertools
import os.path
import time
import scipy
from scipy.sparse import csr_matrix
from sklearn import preprocessing
from sklearn.preprocessing import maxabs_scale
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import StratifiedKFold
#------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------
def fit_pred_offline_classifiers(X_train, y_train, X_test, y_test, X):
classifiers_balanced = {
'SGD': SGDClassifier(class_weight='balanced', n_jobs=10),
'Perceptron': Perceptron(class_weight='balanced', n_jobs=10),
'Passive-Aggressive': PassiveAggressiveClassifier(class_weight='balanced', n_jobs=10),
}
classifiers_bal_predprob = {"SAG": LogisticRegression(solver='sag', n_jobs=10, tol=1e-1, C=1.e4 / 50000 ),} # , C=1.e4 / 50000
cls_stats = {}
preds = {}
for cls_name in classifiers_bal_predprob:
stats = {'n_train': 0, 'n_train_pos': 0, 'accuracy': 0.0, 't0': time.time(), 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
for cls_name in classifiers_balanced:
stats = {'n_train': 0, 'n_train_pos': 0, 'accuracy': 0.0, 't0': time.time(), 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
tick = time.time()
for cls_name, cls in classifiers_bal_predprob.items():
print("fitting %s" % cls_name)
#logging.info("fitting %s" % cls_name)
cls.fit(X_train, y_train)#, classes=all_classes)
preds[cls_name] = cls.predict_proba(X)
# stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
tick = time.time()
for cls_name, cls in classifiers_balanced.items():
#logging.info("fitting %s" % cls_name)
cls = LogisticRegression(solver='sag', n_jobs=10, tol=1e-1, C=1.e4 / X_train.shape[0]) # put this here to get C correct
cls.fit(X_train, y_train)#, classes=all_classes)
preds[cls_name] = cls.predict(X)
# stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
return(cls_stats, preds)
#---------------------------------------
#---------------------------------------
def run_estimates(X, y):
# tick = time.time()
skf = StratifiedKFold(y, n_folds=10, shuffle=True)#, random_state=1234)
cls_stats = {}
preds= {}
foldid = 0
for train_index, test_index in skf:
#logging.info("fold: %d" % foldid)
#logging.info("TRAIN: %s" train_index)#, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
cls_stats[foldid], preds[foldid] = fit_pred_offline_classifiers(X_train, y_train, X_test, y_test, X)
foldid += 1
#for _, stats in sorted(cls_stats.items()):
# accuracy, n_examples = zip(*stats['accuracy_history'])
# fit_time = time.time() - tick
return(cls_stats, preds)
#---------------------------------------
#---------------------------------------
def avergage_per_classifier(cls_stats, classifier_names):
accuracies = {}
median = {}
vs = {}
for classif in classifier_names:
accs = []
for fold, stats in cls_stats.items():
relevant = stats[classif]
accs.append(relevant['accuracy'])
accuracies[classif] = np.mean(accs)
vs[classif] = np.var(accs)
median[classif] = np.median(accs)
return(accuracies, median, vs)
#---------------------------------------
#
#---------------------------------------
def stats_from_estimates(yearly_stats, randomize, run_id):
""" """
classifier_names = ['SAG', 'SGD', 'Perceptron','Passive-Aggressive'] #classifiers.keys()
rows = []
for indx, yr in sess_indx.items()[:79]:
#logging.info(str(yr))
try:
curr = yearly_stats[indx]
mns, meds, vs = avergage_per_classifier(curr, classifier_names )
rows.append([indx, yr, mns['SAG'], mns['SGD'], mns['Perceptron'], mns['Passive-Aggressive'],
meds['SAG'], meds['SGD'], meds['Perceptron'], meds['Passive-Aggressive'],
vs['SAG'], vs['SGD'], vs['Perceptron'], vs['Passive-Aggressive'] ])
except:
logging.error("Error getting stats for: ", str(yr))
res = | pd.DataFrame(data=rows, columns = ['index', 'yrmth',
'mn_sag','mn_sgd','mn_pcpt','mn_passAgr',
'md_sag','md_sgd','md_pcpt','md_passAgr',
'var_sag','var_sgd','var_pcpt','var_passAgr' ]) | pandas.DataFrame |
"""
Tutorial: Multivariate Time Series Forecasting with LSTMs in Keras.
"""
import sys
from datetime import datetime
import keras
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import sklearn
sys.path.append("/Users/tianyudu/Documents/Github/AnnEcon/k models/exchange")
import config
import containers
import methods
def parse(x):
return datetime.strptime(x, "%Y %m %d %H")
dataset = pd.read_csv(
"./data/PRSA.csv",
parse_dates=[["year", "month", "day", "hour"]],
index_col=0,
date_parser=parse)
# Drop number column, clean the data frame.
dataset = dataset.drop(columns=["No"])
dataset.columns = [
"pollution", "dew", "temp", "press",
"wnd_dir", "wnd_spd", "snow", "rain"]
dataset.index.name = "date"
dataset["pollution"].fillna(0, inplace=True)
# Drop hr=0 to hr=23 (first 24 hrs.)
dataset = dataset[24:]
dataset.to_csv("./data/pollution.csv")
# Data cleaned, create new csv file to store the new data.
# load dataset
dataset = | pd.read_csv("./data/pollution.csv", header=0, index_col=0, engine="c") | pandas.read_csv |
import argparse
import os
import logging
from netCDF4 import Dataset
import numpy as np
import pandas as pd
def nc2csv_obs_and_M(src_file_path, dst_dir):
with Dataset(src_file_path) as nc:
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
stations = nc.variables['station'][:]
date = nc.variables['date'][:]
id_list = []
for i in range(len(date)):
for j in range(37):
id = str(date[i])[:8] + '_' + '{:02d}'.format(j)
id_list.append(id)
ID = pd.Series(data=id_list, name='Time')
for i in range(len(stations)):
csv = pd.concat([ID], axis=1)
for var in ['t2m_obs', 'rh2m_obs', 'w10m_obs', 't2m_M', 'rh2m_M', 'w10m_M']:
var_arr = np.array(nc.variables[var][:])
var_arr = np.squeeze(var_arr[:, :, i].reshape(-1, 1))
var_arr[var_arr < -8000] = np.NaN
csv[var] = var_arr
csv.to_csv(os.path.join(dst_dir,str(stations[i]) + '.csv'), index=False)
print(stations[i],' done!')
def nc2csv_merge_pre_and_next(src_file_path,str_lastday,dst_dir):
with Dataset(src_file_path) as nc:
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
stations = nc.variables['station'][:]
date = nc.variables['date'][:]
id_list = []
for i in range(len(date)):
for j in range(24):
id = str(date[i])[:8] + ' ' + '{:02d}'.format(j)
id_list.append(id)
for j in range(13):
id = str_lastday + ' ' + '{:02d}'.format(j)
id_list.append(id)
Time = pd.to_datetime(id_list)
ID = pd.Series(data=Time, name='Time')
for i in range(len(stations)):
csv = | pd.concat([ID], axis=1) | pandas.concat |
# Data files are too large to include. Download from Kaggle: https://www.kaggle.com/c/home-credit-default-risk/data
# Code source: https://www.kaggle.com/jsaguiar/lightgbm-with-simple-features
import argparse
import pickle
import time
import warnings
from contextlib import contextmanager
import numpy as np
import pandas as pd
import redis
from sklearn.model_selection import train_test_split
from credit_utils import *
from willump.evaluation.willump_executor import willump_execute
warnings.simplefilter(action='ignore', category=FutureWarning)
base_folder = "tests/test_resources/home_credit_default_risk/"
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cascades", action="store_true", help="Cascades?")
parser.add_argument("-b", "--debug", help="Debug Mode", action="store_true")
parser.add_argument("-k", "--top_k", type=int, help="Top-K to return", required=True)
parser.add_argument("-d", "--disable", help="Disable Willump", action="store_true")
parser.add_argument("-r", "--redis", help="Redis IP", type=str)
args = parser.parse_args()
if args.cascades:
cascades = pickle.load(open(base_folder + "training_cascades.pk", "rb"))
else:
cascades = None
top_K = args.top_k
if args.redis is None:
redis_ip = "127.0.0.1"
else:
redis_ip = args.redis
db = redis.StrictRedis(host=redis_ip)
@contextmanager
def timer(title):
t0 = time.time()
yield
print("{} - done in {:.5f}s".format(title, time.time() - t0))
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df, nan_as_category=True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
# Preprocess application_train.csv and application_test.csv
def application_train_test(num_rows=None, nan_as_category=False):
# Read data and merge
df = pd.read_csv(base_folder + 'application_train.csv', nrows=num_rows)
print("Train samples: {}".format(len(df)))
# Optional: Remove 4 applications with XNA CODE_GENDER (train set)
df = df[df['CODE_GENDER'] != 'XNA']
# Categorical features with Binary encode (0 or 1; two categories)
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
# Categorical features with One-Hot encode
df, cat_cols = one_hot_encoder(df, nan_as_category)
# NaN values for DAYS_EMPLOYED: 365.243 -> nan
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
# Some simple new features (percentages)
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']
return df
def get_row_to_merge_features_bureau(keys):
global num_queries, num_nan
num_queries += len(keys)
pipe = db.pipeline()
for key in keys:
redis_key = str(0) + "_" + str(int(key))
pipe.get(redis_key)
serialized_result = pipe.execute()
pre_result = []
for ser_entry in serialized_result:
try:
pre_result.append(pickle.loads(ser_entry))
except TypeError:
pre_result.append(bureau_nan)
num_nan += 1
result = | pd.concat(pre_result, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
##
# @file human_neuron_equilibrium.py
# @brief Calculates the equilibrium potential and the resting potential for a typical human neuron
# @author <NAME>
# @date 22/04/2021
#
#
# @details The equilibrium potential and the resting potential are calculated for an excitable membrane at 37 Celsius
# with concentrations of potassium inside the cell of 100 mM, potassium outside the cell of 5 mM, sodium
# inside the cell of 15 mM, sodium outside the cell of 150 mM, cloride inside the cell of 13 mM and cloride
# outside the cell of 150 mM and relative permeabilities compared to the the permeability of potassium of
# sodium of 0.025 and cloride of 0.45.
from biomedical_signal_processing import ABSOLUTE_TEMPERATURE_CELSIUS as T0
from biomedical_signal_processing import nernst_equation, goldman_equation
import numpy as np
import pandas as pd
def main():
T = 37 # Celsius
K_in = 100 # mM
K_out = 5 # mM
Na_in = 15 # mM
Na_out = 150 # mM
Cl_in = 13 # mM
Cl_out = 150 # mM
z_K = 1
z_Na = 1
z_Cl = -1
K_perm = 1
Na_perm = 0.025
Cl_perm = 0.45
ions = pd.Series(['K+', 'Na+', 'Cl-'])
ions_in = pd.Series([K_in, Na_in, Cl_in], index=ions)
ions_out = pd.Series([K_out, Na_out, Cl_out], index=ions)
z_ions = | pd.Series([z_K, z_Na, z_Cl], index=ions) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 11:13:15 2019
@author: jkern
"""
from __future__ import division
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
def hydro(sim_years):
#########################################################################
# This purpose of this script is to use synthetic streamflows at major California
# reservoir sites to simulate daily hydropower production for the PG&E and SCE
# zones of the California electricty market (CAISO), using parameters optimized
# via a differential evolution algorithm.
#########################################################################
# load California storage reservoir (ORCA) sites
df_sites = pd.read_excel('CA_hydropower/sites.xlsx',sheet_name = 'ORCA',header=0)
ORCA_sites = list(df_sites)
# load upper generation amounts for each predicted hydropower dam (PG&E and SCE)
upper_gen = pd.read_excel('CA_hydropower/upper.xlsx',header =0)
# month-day calender
calender = pd.read_excel('CA_hydropower/calender.xlsx',header=0)
# load simulated full natural flows at each California storage reservoir (ORCA site)
df_sim = pd.read_csv('Synthetic_streamflows/synthetic_streamflows_CA.csv',header=0)
df_sim = df_sim.loc[0:(sim_years+3)*365,:]
# load simulated outflows calculated by ORCA
df_ORCA = | pd.read_csv('ORCA_output.csv') | pandas.read_csv |
import dash
import dash_core_components as dcc, dash_table
import dash_html_components as html
from dash.dependencies import Input, Output
from sklearn.decomposition import PCA
import plotly.express as px
import db
import pandas as pd
from labels import FEATURES, SUMMARY, SCATTER
app = dash.Dash(__name__)
df = db.main_db()
df = df.convert_dtypes()
df['datepublished'] = | pd.to_datetime(df['datepublished']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 1 09:55:07 2018
@author: michaelek
"""
import numpy as np
import pandas as pd
###################################
### Functions
def allo_ts_apply(row, from_date, to_date, freq, restr_col, remove_months=True):
"""
Pandas apply function that converts the allocation data to a monthly time series.
"""
crc_from_date = pd.Timestamp(row['FromDate'])
crc_to_date = | pd.Timestamp(row['ToDate']) | pandas.Timestamp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import nltk
from nltk.stem import WordNetLemmatizer
import zipfile
import pandas as pd
import numpy as np
import pickle
from collections import Counter
import gzip
import random
import sklearn
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from nltk.metrics import *
from sklearn.pipeline import Pipeline
def save(obj, filename, protocol=pickle.DEFAULT_PROTOCOL):
with gzip.open(filename, 'wb') as f:
pickle.dump(obj, f, protocol)
def load(filename):
with gzip.open(filename, 'rb') as f:
loaded_object = pickle.load(f)
return loaded_object
booklist=load('booklist.pc')
summarylist,excerptlist,genreslist=load('sum_exc_gen.pc')
sumwds,excwds = load('sum_exc_words.pc')
#%% how many fictions which have more than one tags?
fic_ids=[id for id in genreslist if 'Fiction' in genreslist[id] and len(genreslist[id])>1]
# 2974
fic_list=booklist.loc[fic_ids]
#%% how many cats?
genres=set()
for id in genreslist:
for g in genreslist[id]:
genres.add(g)
genres
for g in genres:
print(g)
#%% frequency stat
def find_ids(exi=[],noexi=[]):
ids=[id for id in genreslist]
for g in exi:
ids=[id for id in ids if g in genreslist[id]]
for g in noexi:
ids=[id for id in ids if g not in genreslist[id]]
return ids
def find_counter(exi=[],noexi=[]):
'count the tags where exi exists and noexi does not exist'
ids=find_ids(exi,noexi)
print(len(ids))
gens=[]
for id in ids:
gens+=genreslist[id]
return pd.DataFrame(Counter(gens).most_common())
temp=find_counter(['Fiction'])
temp=find_counter(['Nonfiction'])
temp=find_counter(["Children's Books"])
temp=find_counter(["Young Adult"])
temp=find_counter([],["Children's Books","Young Adult"])
both_id=find_ids(['Fiction','Nonfiction'])
temp=booklist.loc[both_id]
#%% imputation and correction!
# reorder the genres
for id in genreslist:
if 'Fiction' in genreslist[id] and 'Nonfiction' in genreslist[id]:
booklist.loc[id,'genre0']=np.nan
elif 'Fiction' in genreslist[id]:
booklist.loc[id,'genre0']='Fiction'
elif 'Nonfiction' in genreslist[id]:
booklist.loc[id,'genre0']='Nonfiction'
else:
booklist.loc[id,'genre0']=np.nan
other=genreslist[id].copy()
try:
other.remove('Fiction')
except:
pass
try:
other.remove('Nonfiction')
except:
pass
if len(other)>4: print('error!')
for i in range(1,5):
try:
booklist.loc[id,'genre'+str(i)]=other[i-1]
except:
booklist.loc[id,'genre'+str(i)]=np.nan
booklist=booklist.drop('fiction',axis=1)
# imputation
gens=set()
for id in genreslist:
gens.update(set(genreslist[id]))
gens.remove('Fiction')
gens.remove('Nonfiction')
gens={g:[0,0] for g in gens}
for id in genreslist:
if 'Fiction' in genreslist[id]:
for g in genreslist[id]:
if g in gens:
gens[g][0]+=1
if 'Nonfiction' in genreslist[id]:
for g in genreslist[id]:
if g in gens:
gens[g][1]+=1
for g in gens:
fic,nonfic=gens[g]
ficrate=fic/(fic+nonfic)
if ficrate>0.95:
gens[g]='Fiction'
elif ficrate<0.05:
gens[g]='Nonfiction'
else:
gens[g]=np.nan
ficgens=[g for g in gens if gens[g]=='Fiction']
nficgens=[g for g in gens if gens[g]=='Nonfiction']
bothegens=[g for g in gens if gens[g] not in ['Fiction','Nonfiction']]
for id in booklist.index:
isfic=0
notfic=0
for g in genreslist[id]:
if g in ficgens:
isfic+=1
elif g in nficgens:
notfic+=1
if isfic>notfic:
booklist.genre0[id]='Fiction'
elif isfic<notfic:
booklist.genre0[id]='Nonfiction'
# add text data and save as csv
summary=pd.DataFrame({'summary_text':summarylist})
excerpt=pd.DataFrame({'excerpt_text':excerptlist})
booklistall=booklist.join(summary).join(excerpt)
booklistall.to_csv('booklistall_v2.csv')
with open('booklist.pc','wb') as f:
pc.dump(booklistall,f)
#%% stat for chars
text_col=['author', 'title1', 'title2','summary_text', 'excerpt_text']
textlist=booklist[text_col]
txtall=''
for c in textlist:
for t in textlist[c]:
txtall+=t
temp=Counter(txtall).most_common()
known=' \nQWERTYUIOPASDFGHJKLZXCVBNMqwertyuiopasdfghjklzxcvbnm1234567890'
temp2=[i for i in temp if i[0] not in known]
temp2= | pd.DataFrame(temp2) | pandas.DataFrame |
import operator
from enum import Enum
from typing import Union, Any, Optional, Hashable
import numpy as np
import pandas as pd
import pandas_flavor as pf
from pandas.core.construction import extract_array
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_numeric_dtype,
is_string_dtype,
)
from pandas.core.reshape.merge import _MergeOperation
from janitor.utils import check, check_column
@pf.register_dataframe_method
def conditional_join(
df: pd.DataFrame,
right: Union[pd.DataFrame, pd.Series],
*conditions,
how: str = "inner",
sort_by_appearance: bool = False,
df_columns: Optional[Any] = None,
right_columns: Optional[Any] = None,
) -> pd.DataFrame:
"""
This is a convenience function that operates similarly to `pd.merge`,
but allows joins on inequality operators,
or a combination of equi and non-equi joins.
Join solely on equality are not supported.
If the join is solely on equality, `pd.merge` function
covers that; if you are interested in nearest joins, or rolling joins,
or the first match (lowest or highest) - `pd.merge_asof` covers that.
There is also the IntervalIndex, which is usually more efficient
for range joins, especially if the intervals do not overlap.
Column selection in `df_columns` and `right_columns` is possible using the
[`select_columns`][janitor.functions.select_columns.select_columns] syntax.
This function returns rows, if any, where values from `df` meet the
condition(s) for values from `right`. The conditions are passed in
as a variable argument of tuples, where the tuple is of
the form `(left_on, right_on, op)`; `left_on` is the column
label from `df`, `right_on` is the column label from `right`,
while `op` is the operator. For multiple conditions, the and(`&`)
operator is used to combine the results of the individual conditions.
The operator can be any of `==`, `!=`, `<=`, `<`, `>=`, `>`.
A binary search is used to get the relevant rows for non-equi joins;
this avoids a cartesian join, and makes the process less memory intensive.
For equi-joins, Pandas internal merge function is used.
The join is done only on the columns.
MultiIndex columns are not supported.
For non-equi joins, only numeric and date columns are supported.
Only `inner`, `left`, and `right` joins are supported.
If the columns from `df` and `right` have nothing in common,
a single index column is returned; else, a MultiIndex column
is returned.
Example:
>>> import pandas as pd
>>> import janitor
>>> df1 = pd.DataFrame({"value_1": [2, 5, 7, 1, 3, 4]})
>>> df2 = pd.DataFrame({"value_2A": [0, 3, 7, 12, 0, 2, 3, 1],
... "value_2B": [1, 5, 9, 15, 1, 4, 6, 3],
... })
>>> df1
value_1
0 2
1 5
2 7
3 1
4 3
5 4
>>> df2
value_2A value_2B
0 0 1
1 3 5
2 7 9
3 12 15
4 0 1
5 2 4
6 3 6
7 1 3
>>> df1.conditional_join(
... df2,
... ("value_1", "value_2A", ">="),
... ("value_1", "value_2B", "<=")
... )
value_1 value_2A value_2B
0 2 1 3
1 2 2 4
2 5 3 5
3 5 3 6
4 7 7 9
5 1 0 1
6 1 0 1
7 1 1 3
8 3 1 3
9 3 2 4
10 3 3 5
11 3 3 6
12 4 2 4
13 4 3 5
14 4 3 6
:param df: A pandas DataFrame.
:param right: Named Series or DataFrame to join to.
:param conditions: Variable argument of tuple(s) of the form
`(left_on, right_on, op)`, where `left_on` is the column
label from `df`, `right_on` is the column label from `right`,
while `op` is the operator. The operator can be any of
`==`, `!=`, `<=`, `<`, `>=`, `>`. For multiple conditions,
the and(`&`) operator is used to combine the results
of the individual conditions.
:param how: Indicates the type of join to be performed.
It can be one of `inner`, `left`, `right`.
Full join is not supported. Defaults to `inner`.
:param sort_by_appearance: Default is `False`.
This is useful for strictly non-equi joins,
where the user wants the original order maintained.
If True, values from `df` and `right`
that meet the join condition will be returned
in the final dataframe in the same order
that they were before the join.
:param df_columns: Columns to select from `df`.
It can be a single column or a list of columns.
It is also possible to rename the output columns via a dictionary.
:param right_columns: Columns to select from `right`.
It can be a single column or a list of columns.
It is also possible to rename the output columns via a dictionary.
:returns: A pandas DataFrame of the two merged Pandas objects.
"""
return _conditional_join_compute(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
class _JoinOperator(Enum):
"""
List of operators used in conditional_join.
"""
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
LESS_THAN_OR_EQUAL = "<="
STRICTLY_EQUAL = "=="
NOT_EQUAL = "!="
class _JoinTypes(Enum):
"""
List of join types for conditional_join.
"""
INNER = "inner"
LEFT = "left"
RIGHT = "right"
operator_map = {
_JoinOperator.STRICTLY_EQUAL.value: operator.eq,
_JoinOperator.LESS_THAN.value: operator.lt,
_JoinOperator.LESS_THAN_OR_EQUAL.value: operator.le,
_JoinOperator.GREATER_THAN.value: operator.gt,
_JoinOperator.GREATER_THAN_OR_EQUAL.value: operator.ge,
_JoinOperator.NOT_EQUAL.value: operator.ne,
}
less_than_join_types = {
_JoinOperator.LESS_THAN.value,
_JoinOperator.LESS_THAN_OR_EQUAL.value,
}
greater_than_join_types = {
_JoinOperator.GREATER_THAN.value,
_JoinOperator.GREATER_THAN_OR_EQUAL.value,
}
def _check_operator(op: str):
"""
Check that operator is one of
`>`, `>=`, `==`, `!=`, `<`, `<=`.
Used in `conditional_join`.
"""
sequence_of_operators = {op.value for op in _JoinOperator}
if op not in sequence_of_operators:
raise ValueError(
"The conditional join operator "
f"should be one of {sequence_of_operators}"
)
def _conditional_join_preliminary_checks(
df: pd.DataFrame,
right: Union[pd.DataFrame, pd.Series],
conditions: tuple,
how: str,
sort_by_appearance: bool,
df_columns: Any,
right_columns: Any,
) -> tuple:
"""
Preliminary checks for conditional_join are conducted here.
Checks include differences in number of column levels,
length of conditions, existence of columns in dataframe, etc.
"""
check("right", right, [pd.DataFrame, pd.Series])
df = df.copy()
right = right.copy()
if isinstance(right, pd.Series):
if not right.name:
raise ValueError(
"Unnamed Series are not supported for conditional_join."
)
right = right.to_frame()
if df.columns.nlevels != right.columns.nlevels:
raise ValueError(
"The number of column levels "
"from the left and right frames must match. "
"The number of column levels from the left dataframe "
f"is {df.columns.nlevels}, while the number of column levels "
f"from the right dataframe is {right.columns.nlevels}."
)
if not conditions:
raise ValueError("Kindly provide at least one join condition.")
for condition in conditions:
check("condition", condition, [tuple])
len_condition = len(condition)
if len_condition != 3:
raise ValueError(
"condition should have only three elements; "
f"{condition} however is of length {len_condition}."
)
for left_on, right_on, op in conditions:
check("left_on", left_on, [Hashable])
check("right_on", right_on, [Hashable])
check("operator", op, [str])
check_column(df, [left_on])
check_column(right, [right_on])
_check_operator(op)
if all(
(op == _JoinOperator.STRICTLY_EQUAL.value for *_, op in conditions)
):
raise ValueError("Equality only joins are not supported.")
check("how", how, [str])
checker = {jointype.value for jointype in _JoinTypes}
if how not in checker:
raise ValueError(f"'how' should be one of {checker}.")
check("sort_by_appearance", sort_by_appearance, [bool])
if (df.columns.nlevels > 1) and (
isinstance(df_columns, dict) or isinstance(right_columns, dict)
):
raise ValueError(
"Column renaming with a dictionary is not supported "
"for MultiIndex columns."
)
return (
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
def _conditional_join_type_check(
left_column: pd.Series, right_column: pd.Series, op: str
) -> None:
"""
Raise error if column type is not any of numeric or datetime or string.
"""
permitted_types = {
is_datetime64_dtype,
is_numeric_dtype,
is_string_dtype,
is_categorical_dtype,
}
for func in permitted_types:
if func(left_column):
break
else:
raise ValueError(
"conditional_join only supports "
"string, category, numeric, or date dtypes (without timezone) - "
f"'{left_column.name} is of type {left_column.dtype}."
)
lk_is_cat = is_categorical_dtype(left_column)
rk_is_cat = is_categorical_dtype(right_column)
if lk_is_cat & rk_is_cat:
if not left_column.array._categories_match_up_to_permutation(
right_column.array
):
raise ValueError(
f"'{left_column.name}' and '{right_column.name}' "
"should have the same categories, and the same order."
)
elif not is_dtype_equal(left_column, right_column):
raise ValueError(
f"Both columns should have the same type - "
f"'{left_column.name}' has {left_column.dtype} type;"
f"'{right_column.name}' has {right_column.dtype} type."
)
if (op in less_than_join_types.union(greater_than_join_types)) & (
(is_string_dtype(left_column) | is_categorical_dtype(left_column))
):
raise ValueError(
"non-equi joins are supported "
"only for datetime and numeric dtypes. "
f"{left_column.name} in condition "
f"({left_column.name}, {right_column.name}, {op}) "
f"has a dtype {left_column.dtype}."
)
return None
def _conditional_join_compute(
df: pd.DataFrame,
right: pd.DataFrame,
conditions: list,
how: str,
sort_by_appearance: bool,
df_columns: Any,
right_columns: Any,
) -> pd.DataFrame:
"""
This is where the actual computation
for the conditional join takes place.
A pandas DataFrame is returned.
"""
(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
) = _conditional_join_preliminary_checks(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
eq_check = False
le_lt_check = False
for condition in conditions:
left_on, right_on, op = condition
_conditional_join_type_check(df[left_on], right[right_on], op)
if op == _JoinOperator.STRICTLY_EQUAL.value:
eq_check = True
elif op in less_than_join_types.union(greater_than_join_types):
le_lt_check = True
df.index = range(len(df))
right.index = range(len(right))
multiple_conditions = len(conditions) > 1
if not multiple_conditions:
left_on, right_on, op = conditions[0]
result = _generic_func_cond_join(
df[left_on], right[right_on], op, multiple_conditions
)
if result is None:
return _create_conditional_join_empty_frame(
df, right, how, df_columns, right_columns
)
return _create_conditional_join_frame(
df,
right,
*result,
how,
sort_by_appearance,
df_columns,
right_columns,
)
if eq_check:
result = _multiple_conditional_join_eq(df, right, conditions)
elif le_lt_check:
result = _multiple_conditional_join_le_lt(df, right, conditions)
else:
result = _multiple_conditional_join_ne(df, right, conditions)
if result is None:
return _create_conditional_join_empty_frame(
df, right, how, df_columns, right_columns
)
return _create_conditional_join_frame(
df, right, *result, how, sort_by_appearance, df_columns, right_columns
)
def _less_than_indices(
left_c: pd.Series,
right_c: pd.Series,
strict: bool,
) -> tuple:
"""
Use binary search to get indices where left_c
is less than or equal to right_c.
If strict is True, then only indices
where `left_c` is less than
(but not equal to) `right_c` are returned.
A tuple of integer indexes
for left_c and right_c is returned.
"""
# no point going through all the hassle
if left_c.min() > right_c.max():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = extract_array(left_c, extract_numpy=True)
right_index = right_c.index.to_numpy(dtype=int, copy=False)
right_c = extract_array(right_c, extract_numpy=True)
search_indices = right_c.searchsorted(left_c, side="left")
# if any of the positions in `search_indices`
# is equal to the length of `right_keys`
# that means the respective position in `left_c`
# has no values from `right_c` that are less than
# or equal, and should therefore be discarded
len_right = right_c.size
rows_equal = search_indices == len_right
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
# the idea here is that if there are any equal values
# shift to the right to the immediate next position
# that is not equal
if strict:
rows_equal = right_c[search_indices]
rows_equal = left_c == rows_equal
# replace positions where rows are equal
# with positions from searchsorted('right')
# positions from searchsorted('right') will never
# be equal and will be the furthermost in terms of position
# example : right_c -> [2, 2, 2, 3], and we need
# positions where values are not equal for 2;
# the furthermost will be 3, and searchsorted('right')
# will return position 3.
if rows_equal.any():
replacements = right_c.searchsorted(left_c, side="right")
# now we can safely replace values
# with strictly less than positions
search_indices = np.where(rows_equal, replacements, search_indices)
# check again if any of the values
# have become equal to length of right_c
# and get rid of them
rows_equal = search_indices == len_right
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
if not search_indices.size:
return None
right_c = [right_index[ind:len_right] for ind in search_indices]
right_c = np.concatenate(right_c)
left_c = np.repeat(left_index, len_right - search_indices)
return left_c, right_c
def _greater_than_indices(
left_c: pd.Series,
right_c: pd.Series,
strict: bool,
multiple_conditions: bool,
) -> tuple:
"""
Use binary search to get indices where left_c
is greater than or equal to right_c.
If strict is True, then only indices
where `left_c` is greater than
(but not equal to) `right_c` are returned.
if multiple_conditions is False, a tuple of integer indexes
for left_c and right_c is returned;
else a tuple of the index for left_c, right_c, as well
as the positions of left_c in right_c is returned.
"""
# quick break, avoiding the hassle
if left_c.max() < right_c.min():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = extract_array(left_c, extract_numpy=True)
right_index = right_c.index.to_numpy(dtype=int, copy=False)
right_c = extract_array(right_c, extract_numpy=True)
search_indices = right_c.searchsorted(left_c, side="right")
# if any of the positions in `search_indices`
# is equal to 0 (less than 1), it implies that
# left_c[position] is not greater than any value
# in right_c
rows_equal = search_indices < 1
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
# the idea here is that if there are any equal values
# shift downwards to the immediate next position
# that is not equal
if strict:
rows_equal = right_c[search_indices - 1]
rows_equal = left_c == rows_equal
# replace positions where rows are equal with
# searchsorted('left');
# however there can be scenarios where positions
# from searchsorted('left') would still be equal;
# in that case, we shift down by 1
if rows_equal.any():
replacements = right_c.searchsorted(left_c, side="left")
# return replacements
# `left` might result in values equal to len right_c
replacements = np.where(
replacements == right_c.size, replacements - 1, replacements
)
# now we can safely replace values
# with strictly greater than positions
search_indices = np.where(rows_equal, replacements, search_indices)
# any value less than 1 should be discarded
# since the lowest value for binary search
# with side='right' should be 1
rows_equal = search_indices < 1
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
if not search_indices.size:
return None
if multiple_conditions:
return left_index, right_index, search_indices
right_c = [right_index[:ind] for ind in search_indices]
right_c = np.concatenate(right_c)
left_c = np.repeat(left_index, search_indices)
return left_c, right_c
def _not_equal_indices(left_c: pd.Series, right_c: pd.Series) -> tuple:
"""
Use binary search to get indices where
`left_c` is exactly not equal to `right_c`.
It is a combination of strictly less than
and strictly greater than indices.
A tuple of integer indexes for left_c and right_c
is returned.
"""
dummy = np.array([], dtype=int)
# deal with nulls
l1_nulls = dummy
r1_nulls = dummy
l2_nulls = dummy
r2_nulls = dummy
any_left_nulls = left_c.isna()
any_right_nulls = right_c.isna()
if any_left_nulls.any():
l1_nulls = left_c.index[any_left_nulls.array]
l1_nulls = l1_nulls.to_numpy(copy=False)
r1_nulls = right_c.index
# avoid NAN duplicates
if any_right_nulls.any():
r1_nulls = r1_nulls[~any_right_nulls.array]
r1_nulls = r1_nulls.to_numpy(copy=False)
nulls_count = l1_nulls.size
# blow up nulls to match length of right
l1_nulls = np.tile(l1_nulls, r1_nulls.size)
# ensure length of right matches left
if nulls_count > 1:
r1_nulls = np.repeat(r1_nulls, nulls_count)
if any_right_nulls.any():
r2_nulls = right_c.index[any_right_nulls.array]
r2_nulls = r2_nulls.to_numpy(copy=False)
l2_nulls = left_c.index
nulls_count = r2_nulls.size
# blow up nulls to match length of left
r2_nulls = np.tile(r2_nulls, l2_nulls.size)
# ensure length of left matches right
if nulls_count > 1:
l2_nulls = np.repeat(l2_nulls, nulls_count)
l1_nulls = np.concatenate([l1_nulls, l2_nulls])
r1_nulls = np.concatenate([r1_nulls, r2_nulls])
outcome = _less_than_indices(left_c, right_c, strict=True)
if outcome is None:
lt_left = dummy
lt_right = dummy
else:
lt_left, lt_right = outcome
outcome = _greater_than_indices(
left_c, right_c, strict=True, multiple_conditions=False
)
if outcome is None:
gt_left = dummy
gt_right = dummy
else:
gt_left, gt_right = outcome
left_c = np.concatenate([lt_left, gt_left, l1_nulls])
right_c = np.concatenate([lt_right, gt_right, r1_nulls])
if (not left_c.size) & (not right_c.size):
return None
return left_c, right_c
def _eq_indices(
left_c: pd.Series,
right_c: pd.Series,
) -> tuple:
"""
Use binary search to get indices where left_c
is equal to right_c.
Returns a tuple of the left_index, right_index,
lower_boundary and upper_boundary.
"""
# no point going through all the hassle
if left_c.min() > right_c.max():
return None
if left_c.max() < right_c.min():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = extract_array(left_c, extract_numpy=True)
right_index = right_c.index.to_numpy(dtype=int, copy=False)
right_c = extract_array(right_c, extract_numpy=True)
lower_boundary = right_c.searchsorted(left_c, side="left")
upper_boundary = right_c.searchsorted(left_c, side="right")
keep_rows = lower_boundary < upper_boundary
if not keep_rows.any():
return None
if not keep_rows.all():
left_index = left_index[keep_rows]
lower_boundary = lower_boundary[keep_rows]
upper_boundary = upper_boundary[keep_rows]
return left_index, right_index, lower_boundary, upper_boundary
def _generic_func_cond_join(
left_c: pd.Series,
right_c: pd.Series,
op: str,
multiple_conditions: bool,
) -> tuple:
"""
Generic function to call any of the individual functions
(_less_than_indices, _greater_than_indices,
or _not_equal_indices).
"""
strict = False
if op in {
_JoinOperator.GREATER_THAN.value,
_JoinOperator.LESS_THAN.value,
_JoinOperator.NOT_EQUAL.value,
}:
strict = True
if op in less_than_join_types:
return _less_than_indices(left_c, right_c, strict)
elif op in greater_than_join_types:
return _greater_than_indices(
left_c, right_c, strict, multiple_conditions
)
elif op == _JoinOperator.NOT_EQUAL.value:
return _not_equal_indices(left_c, right_c)
def _generate_indices(
left_index: np.ndarray, right_index: np.ndarray, conditions: list
) -> tuple:
"""
Run a for loop to get the final indices.
This iteratively goes through each condition,
builds a boolean array,
and gets indices for rows that meet the condition requirements.
`conditions` is a list of tuples, where a tuple is of the form:
`(Series from df, Series from right, operator)`.
"""
for condition in conditions:
left_c, right_c, op = condition
left_c = extract_array(left_c, extract_numpy=True)[left_index]
right_c = extract_array(right_c, extract_numpy=True)[right_index]
op = operator_map[op]
mask = op(left_c, right_c)
if not mask.any():
return None
if is_extension_array_dtype(mask):
mask = mask.to_numpy(dtype=bool, na_value=False)
if not mask.all():
left_index = left_index[mask]
right_index = right_index[mask]
return left_index, right_index
def _multiple_conditional_join_ne(
df: pd.DataFrame, right: pd.DataFrame, conditions: list
) -> tuple:
"""
Get indices for multiple conditions,
where all the operators are `!=`.
Returns a tuple of (left_index, right_index)
"""
# currently, there is no optimization option here
# not equal typically combines less than
# and greater than, so a lot more rows are returned
# than just less than or greater than
# here we get indices for the first condition in conditions
# then use those indices to get the final indices,
# using _generate_indices
first, *rest = conditions
left_on, right_on, op = first
# get indices from the first condition
result = _generic_func_cond_join(
df[left_on], right[right_on], op, multiple_conditions=False
)
if result is None:
return None
rest = (
(df[left_on], right[right_on], op) for left_on, right_on, op in rest
)
return _generate_indices(*result, rest)
def _multiple_conditional_join_eq(
df: pd.DataFrame, right: pd.DataFrame, conditions: list
) -> tuple:
"""
Get indices for multiple conditions,
if any of the conditions has an `==` operator.
Returns a tuple of (df_index, right_index)
"""
# TODO
# this uses the idea in the `_range_indices` function
# for less than and greater than;
# I'd like to believe there is a smarter/more efficient way of doing this
# where the filter occurs within the join, and avoids a blow-up
# the current implementation uses
# a list comprehension to find first matches
# in a bid to reduce the blow up size ...
# this applies only to integers/dates
# and only offers advantages in scenarios
# where the right is duplicated
# for one to many joins,
# or one to one or strings/category, use merge
# as it is significantly faster than a binary search
eqs = [
(left_on, right_on)
for left_on, right_on, op in conditions
if op == _JoinOperator.STRICTLY_EQUAL.value
]
left_on, right_on = zip(*eqs)
left_on = [*left_on]
right_on = [*right_on]
strings_or_category = any(
col
for col in left_on
if (is_string_dtype(df[col]) | is_categorical_dtype(df[col]))
)
if (
strings_or_category
| (not right.duplicated(subset=right_on).any(axis=None))
| (not df.duplicated(subset=left_on).any(axis=None))
):
rest = (
(df[left_on], right[right_on], op)
for left_on, right_on, op in conditions
if op != _JoinOperator.STRICTLY_EQUAL.value
)
left_index, right_index = _MergeOperation(
df,
right,
left_on=left_on,
right_on=right_on,
sort=False,
copy=False,
)._get_join_indexers()
if not left_index.size:
return None
return _generate_indices(left_index, right_index, rest)
left_on, right_on = eqs[0]
outcome = _eq_indices(df[left_on], right[right_on])
if not outcome:
return None
left_index, right_index, lower_boundary, upper_boundary = outcome
eq_check = [condition for condition in conditions if condition != eqs[0]]
rest = [
(df.loc[left_index, left_on], right.loc[right_index, right_on], op)
for left_on, right_on, op in eq_check
]
rest = [
(
extract_array(left_c, extract_numpy=True),
extract_array(right_c, extract_numpy=True),
operator_map[op],
)
for left_c, right_c, op in rest
]
def _extension_array_check(arr):
"""
Convert boolean array to numpy array
if it is an extension array.
"""
if is_extension_array_dtype(arr):
return arr.to_numpy(dtype=bool, na_value=False, copy=False)
return arr
pos = np.copy(upper_boundary)
upper = np.copy(upper_boundary)
counter = np.arange(left_index.size)
# faster within C/Rust? better implemented within Pandas itself?
# the idea here is that lower_boundary moves up by 1
# till it gets to upper_boundary;
# if we get all our matches before the end of the iteration, even better
for _ in range((upper_boundary - lower_boundary).max()):
if not counter.size:
break
if (lower_boundary == upper).any():
keep_rows = lower_boundary < upper
rest = [
(left_c[keep_rows], right_c, op)
for left_c, right_c, op in rest
]
lower_boundary = lower_boundary[keep_rows]
upper = upper[keep_rows]
counter = counter[keep_rows]
keep_rows = [
op(left_c, right_c[lower_boundary]) for left_c, right_c, op in rest
]
keep_rows = [_extension_array_check(arr) for arr in keep_rows]
keep_rows = np.logical_and.reduce(keep_rows)
if not keep_rows.any():
lower_boundary += 1
continue
pos[counter[keep_rows]] = lower_boundary[keep_rows]
counter = counter[~keep_rows]
rest = [
(left_c[~keep_rows], right_c, op) for left_c, right_c, op in rest
]
upper = upper[~keep_rows]
lower_boundary = lower_boundary[~keep_rows]
lower_boundary += 1
keep_rows = pos < upper_boundary
if not keep_rows.any():
return None
if not keep_rows.all():
left_index = left_index[keep_rows]
pos = pos[keep_rows]
upper_boundary = upper_boundary[keep_rows]
repeater = upper_boundary - pos
right_index = [
right_index[start:end] for start, end in zip(pos, upper_boundary)
]
right_index = np.concatenate(right_index)
left_index = np.repeat(left_index, repeater)
eq_check = [
(df[left_on], right[right_on], op)
for left_on, right_on, op in eq_check
]
return _generate_indices(left_index, right_index, eq_check)
def _multiple_conditional_join_le_lt(
df: pd.DataFrame, right: pd.DataFrame, conditions: list
) -> tuple:
"""
Get indices for multiple conditions,
where `>/>=` or `</<=` is present,
and there is no `==` operator.
Returns a tuple of (df_index, right_index)
"""
# there is an opportunity for optimization for range joins
# which is usually `lower_value < value < upper_value`
# or `lower_value < a` and `b < upper_value`
# intervalindex is not used here, as there are scenarios
# where there will be overlapping intervals;
# intervalindex does not offer an efficient way to get
# the indices for overlaps
# also, intervalindex covers only the first option
# i.e => `lower_value < value < upper_value`
# it does not extend to range joins for different columns
# i.e => `lower_value < a` and `b < upper_value`
# the option used for range joins is a simple form
# dependent on sorting and extensible to overlaps
# as well as the second option:
# i.e =>`lower_value < a` and `b < upper_value`
# range joins are also the more common types of non-equi joins
# the other joins do not have an optimisation opportunity
# as far as I know, so a blowup of all the rows
# is unavoidable.
# future PR could use numba to improve performance, although it
# still doesn't help that an optimisation path is not available
# that I am aware of
# first step is to get two conditions, if possible
# where one has a less than operator
# and the other has a greater than operator
# get the indices from that
# and then build the remaining indices,
# using _generate_indices function
# the aim of this for loop is to see if there is
# the possiblity of a range join, and if there is
# use the optimised path
le_lt = None
ge_gt = None
# keep the first match for le_lt or ge_gt
for condition in conditions:
*_, op = condition
if op in less_than_join_types:
if le_lt:
continue
le_lt = condition
elif op in greater_than_join_types:
if ge_gt:
continue
ge_gt = condition
if le_lt and ge_gt:
break
# optimised path
if le_lt and ge_gt:
rest = [
condition
for condition in conditions
if condition not in (ge_gt, le_lt)
]
if rest:
rest = (
(df[left_on], right[right_on], op)
for left_on, right_on, op in rest
)
else:
rest = None
return _range_indices(df, right, ge_gt, le_lt, rest)
# no optimised path
# blow up the rows and prune
if le_lt:
conditions = (
condition for condition in conditions if condition != le_lt
)
conditions = (
(df[left_on], right[right_on], op)
for left_on, right_on, op in conditions
)
left_on, right_on, op = le_lt
outcome = _generic_func_cond_join(
df[left_on],
right[right_on],
op,
multiple_conditions=False,
)
if outcome is None:
return None
return _generate_indices(*outcome, conditions)
# no optimised path
# blow up the rows and prune
if ge_gt:
conditions = (
condition for condition in conditions if condition != ge_gt
)
conditions = (
(df[left_on], right[right_on], op)
for left_on, right_on, op in conditions
)
left_on, right_on, op = ge_gt
outcome = _generic_func_cond_join(
df[left_on],
right[right_on],
op,
multiple_conditions=False,
)
if outcome is None:
return None
return _generate_indices(*outcome, conditions)
def _range_indices(
df: pd.DataFrame,
right: pd.DataFrame,
first: tuple,
second: tuple,
rest: tuple = None,
):
"""
Retrieve index positions for range/interval joins.
Idea inspired by article:
https://www.vertica.com/blog/what-is-a-range-join-and-why-is-it-so-fastba-p223413/
Returns a tuple of (left_index, right_index)
"""
# summary of code for range join:
# get the positions where start_left is >/>= start_right
# then within the positions,
# get the positions where end_left is </<= end_right
# this should reduce the search space
left_on, right_on, op = first
strict = False
if op == _JoinOperator.GREATER_THAN.value:
strict = True
outcome = _greater_than_indices(
df[left_on],
right[right_on],
strict,
multiple_conditions=True,
)
if outcome is None:
return None
left_index, right_index, search_indices = outcome
left_on, right_on, op = second
right_c = right.loc[right_index, right_on]
left_c = df.loc[left_index, left_on]
left_c = extract_array(left_c, extract_numpy=True)
op = operator_map[op]
pos = np.copy(search_indices)
counter = np.arange(left_index.size)
ext_arr = is_extension_array_dtype(left_c)
dupes = right_c.duplicated(keep="first")
right_c = extract_array(right_c, extract_numpy=True)
# use position, not label
uniqs_index = np.arange(right_c.size)
if dupes.any():
uniqs_index = uniqs_index[~dupes]
right_c = right_c[~dupes]
for ind in range(uniqs_index.size):
if not counter.size:
break
keep_rows = op(left_c, right_c[ind])
if ext_arr:
keep_rows = keep_rows.to_numpy(
dtype=bool, na_value=False, copy=False
)
if not keep_rows.any():
continue
# get the index positions where left_c is </<= right_c
# that minimum position combined with the equivalent position
# from search_indices becomes our search space
# for the equivalent left_c index
pos[counter[keep_rows]] = uniqs_index[ind]
counter = counter[~keep_rows]
left_c = left_c[~keep_rows]
dupes = None
uniqs_index = None
# no point searching within (a, b)
# if a == b
# since range(a, b) yields none
keep_rows = pos < search_indices
if not keep_rows.any():
return None
if not keep_rows.all():
left_index = left_index[keep_rows]
pos = pos[keep_rows]
search_indices = search_indices[keep_rows]
repeater = search_indices - pos
right_index = [
right_index[start:end] for start, end in zip(pos, search_indices)
]
# get indices and filter to get exact indices
# that meet the condition
right_index = np.concatenate(right_index)
left_index = np.repeat(left_index, repeater)
# here we search for actual positions
# where left_c is </<= right_c
# safe to index the arrays, since we are picking the positions
# which are all in the original `df` and `right`
# doing this allows some speed gains
# while still ensuring correctness
left_c = extract_array(df[left_on], extract_numpy=True)[left_index]
right_c = extract_array(right[right_on], extract_numpy=True)[right_index]
mask = op(left_c, right_c)
if ext_arr:
mask = mask.to_numpy(dtype=bool, na_value=False)
if not mask.all():
left_index = left_index[mask]
right_index = right_index[mask]
if not rest:
return left_index, right_index
return _generate_indices(left_index, right_index, rest)
def _cond_join_select_columns(columns: Any, df: pd.DataFrame):
"""
Select and/or rename columns in a DataFrame.
Returns a Pandas DataFrame.
"""
df = df.select_columns(columns)
if isinstance(columns, dict):
df.columns = [columns.get(name, name) for name in df]
return df
def _create_multiindex_column(df: pd.DataFrame, right: pd.DataFrame):
"""
Create a MultiIndex column for conditional_join.
"""
header = [np.array(["left"]).repeat(df.columns.size)]
columns = [
df.columns.get_level_values(n) for n in range(df.columns.nlevels)
]
header.extend(columns)
df.columns = pd.MultiIndex.from_arrays(header)
header = [np.array(["right"]).repeat(right.columns.size)]
columns = [
right.columns.get_level_values(n) for n in range(right.columns.nlevels)
]
header.extend(columns)
right.columns = pd.MultiIndex.from_arrays(header)
header = None
return df, right
def _create_conditional_join_empty_frame(
df: pd.DataFrame,
right: pd.DataFrame,
how: str,
df_columns: Any,
right_columns: Any,
):
"""
Create final dataframe for conditional join,
if there are no matches.
"""
if df_columns:
df = _cond_join_select_columns(df_columns, df)
if right_columns:
right = _cond_join_select_columns(right_columns, right)
if set(df.columns).intersection(right.columns):
df, right = _create_multiindex_column(df, right)
if how == _JoinTypes.INNER.value:
df = df.dtypes.to_dict()
right = right.dtypes.to_dict()
df = {**df, **right}
df = {key: pd.Series([], dtype=value) for key, value in df.items()}
return pd.DataFrame(df, copy=False)
if how == _JoinTypes.LEFT.value:
right = right.dtypes.to_dict()
right = {
key: float if dtype.kind == "i" else dtype
for key, dtype in right.items()
}
right = {
key: pd.Series([], dtype=value) for key, value in right.items()
}
right = pd.DataFrame(right, copy=False)
else: # how == 'right'
df = df.dtypes.to_dict()
df = {
key: float if dtype.kind == "i" else dtype
for key, dtype in df.items()
}
df = {key: pd.Series([], dtype=value) for key, value in df.items()}
df = | pd.DataFrame(df, copy=False) | pandas.DataFrame |
#Library that allows us to read csv files
import pandas as pd
#Other Libraries needed
import numpy as np
#Allows us to remove all non letters
import re
#Allows us to remove html makrup from reviews in the data
from bs4 import BeautifulSoup
#allows us to remove stopwords
from nltk.corpus import stopwords
#allows us to simplify words down to roots "Stem the words"
from nltk.stem.porter import PorterStemmer
#Allows us to create a vector for each review
from sklearn.feature_extraction.text import CountVectorizer
#the Machine learning part that predicts the sentiment of the reviews
from sklearn.ensemble import RandomForestClassifier
#reads the csv file using pandas and fills it in the data frame called train
train = | pd.read_csv("labeledTrainData.tsv", header=0, delimiter="\t", quoting=3) | pandas.read_csv |
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import logindetails
from bs4 import BeautifulSoup
import pandas as pd
class Web:
def __init__(self):
''' This will initiate ony browser with required website, which is Monster.com in our case.
'''
self.driver = webdriver.Chrome(executable_path="d:\\Python Practice tests\\Web scraping\\chromedriver.exe")
self.driver.get('https://www.monsterindia.com/')
time.sleep(3)
class Login_Monster(Web):
def login(self):
'''This function will do the login process, with email id and password, and will print 'Login into Monster site:- Successful.',
else it will give the error if any.
'''
try:
self.login_button = self.driver.find_element(By.XPATH, "//div[@class='nav-wrapper flex-container slide-effect']//child::ul[@class='mq-menu-list user-section flex-item']//child::ul//child::li//child::a//child::span[text()= 'Jobseeker Login']")
self.login_button.click()
time.sleep(3)
self.email = self.driver.find_element(By.XPATH, "//input[@id='signInName']")
self.email.send_keys(logindetails.login.get_email())
time.sleep(2)
self.password = self.driver.find_element(By.XPATH, "//input[@name='password']")
self.password.send_keys(logindetails.login.get_password())
self.signIn = self.driver.find_element(By.XPATH, "//input[@id='signInbtn']")
self.signIn.click()
print('Login into Monster site:- Successful.')
time.sleep(3)
except Exception as e:
print(e, ":There is an error.")
return
class Job_description(Login_Monster):
profile = 'Data Analyst'
location_1 = 'Gurgaon'
def Job_profile(self):
''' Job profile function will help in searching for the required profile and location, wherever you need and whatever you are looking
for. You can change location and profile, under profile = ''. and location_1 = ''.
'''
self.search = self.driver.find_element(By.XPATH, "//input[@class= 'input search-bar home_ac']")
self.search.click()
self.search.send_keys(self.profile)
time.sleep(2)
self.location = self.driver.find_element(By.XPATH, "//input[@class= 'input location_ac']")
self.location.send_keys(self.location_1)
self.search_btn = self.driver.find_element(By.XPATH, "//input[@class='btn']")
self.search_btn.click()
return
class Data(Job_description):
def job_postings(self):
''' Job Postings function will scrape all the postings and all required details regarding the searched job profile,
such as, Job title, salary, requirements, etc. also it will scrape all the data and convert all it into csv file.
'''
self.df = | pd.DataFrame({'Job_Title':[''], 'Company':[''], 'Experience':[''], 'Salary':[''], 'Job_Description':[''], 'Post_Date':[''], 'Link':['']}) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.