prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import period_range, PeriodIndex, Index, date_range
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestPeriodIndex(tm.TestCase):
def setUp(self):
pass
def test_joins(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
|
tm.assertIsInstance(joined, PeriodIndex)
|
pandas.util.testing.assertIsInstance
|
"""
xarray.DataArray operations for use with Anxcor processing routines
"""
import numpy as np
import xarray as xr
import anxcor.filters as filt_ops
import anxcor.numpyfftfilter as npfilt_ops
import copy
import pandas as pd
from anxcor.abstractions import XArrayRolling, XArrayProcessor, _XArrayRead, _XArrayWrite
TAPER_DEFAULT =0.05
RESAMPLE_DEFAULT=10.0
UPPER_CUTOFF_FREQ=5.0
LOWER_CUTOFF_FREQ=0.01
MAX_TAU_DEFAULT=100.0
FILTER_ORDER_BANDPASS=4
SECONDS_2_NANOSECONDS = 1e9
OPERATIONS_SEPARATION_CHARACTER = '->:'
## t-norm constants
T_NORM_TYPE='reduce_metric'
T_NORM_ROLLING_METRIC= 'mean'
T_NORM_REDUCE_METRIC = 'max'
T_NORM_WINDOW=10.0
T_NORM_LOWER_FREQ=0.001
T_NORM_UPPER_FREQ=0.05
## Whitening constants
WHITEN_REDUCE_METRIC = None
WHITEN_ROLLING_METRIC='mean'
WHITEN_WINDOW_RATIO=0.01
FILTER_ORDER_WHITEN=3
WHITEN_TYPE='reduce_metric'
class XArrayConverter(XArrayProcessor):
"""
converts an obspy stream into an xarray
"""
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.writer = _XArrayWrite(None)
self.reader = _XArrayRead(None)
def execute(self, stream, *args, starttime=0,**kwargs):
if stream is not None and len(stream)>0:
return self._convert_trace_2_xarray(stream,starttime)
return None
def _convert_trace_2_xarray(self, stream,starttime):
timeseries = self._get_timeseries(stream)
coordinates = self._get_coordinates(stream)
delta = self._get_delta(stream)
station_code = self._get_station_id(stream)
name = self._get_dataname(stream)
channels = self._get_channels(stream)
data = self._create_numpy_data(channels, stream)
metadata={'coords': {'time' :timeseries,
'channels':channels,
'station_id':[station_code]},
'name':name,
'geographic_coordinates':coordinates,
'delta':delta,
'starttime':starttime
}
xarray = self._create_xarray(data, metadata)
return xarray
def _create_xarray(self, data, metadata):
xarray = xr.DataArray(data, coords=[metadata['coords']['channels'],
metadata['coords']['station_id'],
metadata['coords']['time']],
dims=['channel', 'station_id', 'time'])
xarray.name = metadata['name']
xarray.attrs['delta'] = metadata['delta']
xarray.attrs['starttime'] = metadata['starttime']
xarray.attrs['operations'] = 'xconvert'
if metadata['geographic_coordinates'] is not None:
xarray.attrs['location'] = metadata['geographic_coordinates']
return xarray
def _get_channels(self,stream):
return [trace.stats.channel for trace in stream ]
def _create_numpy_data(self, channels, stream):
data = np.zeros((len(channels), 1, len(stream[0].data)))
for trace in stream:
chan = channels.index(trace.stats.channel)
data[chan, 0, :] = trace.data
return data
def _get_station_id(self, stream):
network = stream[0].stats.network
station = stream[0].stats.station
return network + '.' + station
def _get_dataname(self, stream):
name = 'default'
if hasattr(stream[0].stats,'name'):
name = stream[0].stats.name
return name
def _get_delta(self,stream):
return stream[0].stats.delta
def _get_coordinates(self,stream):
if hasattr(stream[0].stats,'coordinates'):
return stream[0].stats.coordinates
return None
def _get_timeseries(self, stream):
starttime = np.datetime64(stream[0].stats.starttime.datetime)
endtime = np.datetime64(stream[0].stats.endtime.datetime)
delta = stream[0].stats.delta
timedelta =
|
pd.Timedelta(delta, 's')
|
pandas.Timedelta
|
# Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
import os
import argparse
import numpy as np
import pandas as pd
from copy import deepcopy
import neat
# Path to file containing neat prescriptors. Here we simply use a
# recent checkpoint of the population from train_prescriptor.py,
# but this is likely not the most complementary set of prescriptors.
# Many approaches can be taken to generate/collect more diverse sets.
# Note: this set can contain up to 10 prescriptors for evaluation.
from covid_xprize.examples.prescriptors.neat.utils import prepare_historical_df, CASES_COL, IP_COLS, IP_MAX_VALUES, \
add_geo_id, get_predictions, PRED_CASES_COL
PRESCRIPTORS_FILE = 'neat-checkpoint-0'
# Number of days the prescriptors look at in the past.
NB_LOOKBACK_DAYS = 14
def prescribe(start_date_str: str,
end_date_str: str,
path_to_prior_ips_file: str,
path_to_cost_file: str,
output_file_path) -> None:
start_date = pd.to_datetime(start_date_str, format='%Y-%m-%d')
end_date = pd.to_datetime(end_date_str, format='%Y-%m-%d')
# Load historical data with basic preprocessing
print("Loading historical data...")
df = prepare_historical_df()
# Restrict it to dates before the start_date
df = df[df['Date'] <= start_date]
# Fill in any missing case data using predictor given ips_df.
# todo: ignore ips_df for now, and instead assume we have case
# data for all days and geos up until the start_date.
# Create historical data arrays for all geos
past_cases = {}
past_ips = {}
for geo in df['GeoID'].unique():
geo_df = df[df['GeoID'] == geo]
past_cases[geo] = np.maximum(0, np.array(geo_df[CASES_COL]))
past_ips[geo] = np.array(geo_df[IP_COLS])
# Gather values for scaling network output
ip_max_values_arr = np.array([IP_MAX_VALUES[ip] for ip in IP_COLS])
# Load prescriptors
checkpoint = neat.Checkpointer.restore_checkpoint(PRESCRIPTORS_FILE)
prescriptors = checkpoint.population.values()
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
'config-prescriptor')
# Load IP costs to condition prescriptions
cost_df = pd.read_csv(path_to_cost_file)
cost_df['RegionName'] = cost_df['RegionName'].fillna("")
cost_df = add_geo_id(cost_df)
geo_costs = {}
for geo in cost_df['GeoID'].unique():
costs = cost_df[cost_df['GeoID'] == geo]
cost_arr = np.array(costs[IP_COLS])[0]
geo_costs[geo] = cost_arr
# Generate prescriptions
prescription_dfs = []
for prescription_idx, prescriptor in enumerate(prescriptors):
print("Generating prescription", prescription_idx, "...")
# Create net from genome
net = neat.nn.FeedForwardNetwork.create(prescriptor, config)
# Set up dictionary for keeping track of prescription
df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}
for ip_col in sorted(IP_MAX_VALUES.keys()):
df_dict[ip_col] = []
# Set initial data
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# Generate prescriptions one day at a time, feeding resulting
# predictions from the predictor back into the prescriptor.
for date in pd.date_range(start_date, end_date):
date_str = date.strftime("%Y-%m-%d")
# Get prescription for all regions
for geo in df['GeoID'].unique():
# Prepare input data. Here we use log to place cases
# on a reasonable scale; many other approaches are possible.
X_cases = np.log(eval_past_cases[geo][-NB_LOOKBACK_DAYS:] + 1)
X_ips = eval_past_ips[geo][-NB_LOOKBACK_DAYS:]
X_costs = geo_costs[geo]
X = np.concatenate([X_cases.flatten(),
X_ips.flatten(),
X_costs])
# Get prescription
prescribed_ips = net.activate(X)
# Map prescription to integer outputs
prescribed_ips = (prescribed_ips * ip_max_values_arr).round()
# Add it to prescription dictionary
country_name, region_name = geo.split('__')
if region_name == 'nan':
region_name = np.nan
df_dict['CountryName'].append(country_name)
df_dict['RegionName'].append(region_name)
df_dict['Date'].append(date_str)
for ip_col, prescribed_ip in zip(IP_COLS, prescribed_ips):
df_dict[ip_col].append(prescribed_ip)
# Create dataframe from prescriptions
pres_df = pd.DataFrame(df_dict)
# Make prediction given prescription for all countries
pred_df = get_predictions(start_date_str, date_str, pres_df)
# Update past data with new day of prescriptions and predictions
pres_df['GeoID'] = pres_df['CountryName'] + '__' + pres_df['RegionName'].astype(str)
pred_df['RegionName'] = pred_df['RegionName'].fillna("")
pred_df['GeoID'] = pred_df['CountryName'] + '__' + pred_df['RegionName'].astype(str)
new_pres_df = pres_df[pres_df['Date'] == date_str]
new_pred_df = pred_df[pred_df['Date'] == date_str]
for geo in df['GeoID'].unique():
geo_pres = new_pres_df[new_pres_df['GeoID'] == geo]
geo_pred = new_pred_df[new_pred_df['GeoID'] == geo]
# Append array of prescriptions
pres_arr = np.array([geo_pres[ip_col].values[0] for ip_col in IP_COLS]).reshape(1,-1)
eval_past_ips[geo] = np.concatenate([eval_past_ips[geo], pres_arr])
# It is possible that the predictor does not return values for some regions.
# To make sure we generate full prescriptions, this script continues anyway.
# Geos that are ignored in this way by the predictor, will not be used in
# quantitative evaluation. A list of such geos can be found in unused_geos.txt.
if len(geo_pred) != 0:
eval_past_cases[geo] = np.append(eval_past_cases[geo],
geo_pred[PRED_CASES_COL].values[0])
# Add prescription df to list of all prescriptions for this submission
pres_df['PrescriptionIndex'] = prescription_idx
prescription_dfs.append(pres_df)
# Combine dfs for all prescriptions into a single df for the submission
prescription_df =
|
pd.concat(prescription_dfs)
|
pandas.concat
|
import os, datetime
import csv
import pycurl
import sys
import shutil
from openpyxl import load_workbook
import pandas as pd
import download.box
from io import BytesIO
import numpy as np
from download.box import LifespanBox
verbose = True
snapshotdate = datetime.datetime.today().strftime('%m_%d_%Y')
box_temp='/home/petra/UbWinSharedSpace1/boxtemp' #location of local copy of curated data
box = LifespanBox(cache=box_temp)
redcapconfigfile="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/.boxApp/redcapconfig.csv"
#grab stuff from corrected and curated
#get list of filenames
##########################
#folderlistlabels=['WashU_HCAorBoth','WashU_HCD', 'UCLA_HCAorBoth','UCLA_HCD', 'UMN_HCAorBoth','UMN_HCD', 'MGH_HCAorBoth','Harvard_HCD']
#folderlistnums= [82804729845, 82804015457,82807223120, 82805124019, 82803665867, 82805151056,82761770877, 82803734267]
#Harvard
Harv=82803734267
Harvattn=96013516511
MGH2=82761770877
MGHattn=96148925420
WashUD=82804015457
WashUDattn=96147128675
WashUA=82804729845
WashUAattn=96149947498
UMNA=82803665867
UMNAattn=96153923311
UMND=82805151056
UMNDattn=96155708581
UCLAA=82807223120
UCLAAattn=96154919803
UCLAD=82805124019
UCLADattn=96162759127
harvcleandata, harvcleanscore=curatedandcorrected(Harv,Harvattn)
mghcleandata, mghcleanscore=curatedandcorrected(MGH2,MGHattn)
washudcleandata,washudcleanscore=curatedandcorrected(WashUD,WashUDattn)
washuacleandata,washuacleanscore=curatedandcorrected(WashUA,WashUAattn)
umnacleandata,umnacleanscore=curatedandcorrected(UMNA,UMNAattn)
umndcleandata,umndcleanscore=curatedandcorrected(UMND,UMNDattn)
uclaacleandata,uclaacleanscore=curatedandcorrected(UCLAA,UCLAAattn)
ucladcleandata,ucladcleanscore=curatedandcorrected(UCLAD,UCLADattn)
###stopped here
harvcleandata.to_csv(box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
#box.update_file(497579203898,box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
harvcleanscore.to_csv(box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#box.update_file(497530866864,box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
mghcleandata.to_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
mghcleanscore.to_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#update box files by hand
washudcleandata.to_csv(box_temp+'/WashU_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
washudcleanscore.to_csv(box_temp+'/WashU_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
washuacleandata.to_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
washuacleanscore.to_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
umnacleandata.to_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
umnacleanscore.to_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
umndcleandata.to_csv(box_temp+'/UMN_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
umndcleanscore.to_csv(box_temp+'/UMN_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
uclaacleandata.to_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
uclaacleanscore.to_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
ucladcleandata.to_csv(box_temp+'/UCLA_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
ucladcleanscore.to_csv(box_temp+'/UCLA_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#concatenate cleandata for snapshotdate - putting read_csv here in case not loaded into memory
harvcleandata=pd.read_csv(box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
mghcleandata=pd.read_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
washudcleandata=pd.read_csv(box_temp+'/WashU_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
washuacleandata=pd.read_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
umnacleandata=pd.read_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
umndcleandata=pd.read_csv(box_temp+'/UMN_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
uclaacleandata=pd.read_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
ucladcleandata=pd.read_csv(box_temp+'/UCLA_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
allrawdataHCAorBoth=pd.concat([mghcleandata,washuacleandata,umnacleandata,uclaacleandata],axis=0)
allrawdataHCD=pd.concat([harvcleandata,washudcleandata,umndcleandata,ucladcleandata],axis=0)
harvcleanscore=pd.read_csv(box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
mghcleanscore=pd.read_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
washudcleanscore=pd.read_csv(box_temp+'/WashU_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
washuacleanscore=pd.read_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
umnacleanscore=pd.read_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
umndcleanscore=pd.read_csv(box_temp+'/UMN_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
uclaacleanscore=pd.read_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
ucladcleanscore=pd.read_csv(box_temp+'/UCLA_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
allscoresHCAorBoth=pd.concat([mghcleanscore,washuacleanscore,umnacleanscore,uclaacleanscore],axis=0)
allscoresHCD=pd.concat([harvcleanscore,washudcleanscore,umndcleanscore,ucladcleanscore],axis=0)
#make csv
allrawdataHCAorBoth.to_csv(box_temp+'/HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
allrawdataHCD.to_csv(box_temp+'/HCD_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
allscoresHCAorBoth.to_csv(box_temp+'/HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
allscoresHCD.to_csv(box_temp+'/HCD_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
def curatedandcorrected(curatedfolderid,needsattnfolder):
harvardfiles, harvardfolders=foldercontents(curatedfolderid)
#dont grab files that need attention
harvardfolders=harvardfolders.loc[~(harvardfolders.foldername.str.contains('needs_attention'))]
harvardfiles2, harvardfolders2=folderlistcontents(harvardfolders.foldername,harvardfolders.folder_id)
harvardfiles=pd.concat([harvardfiles,harvardfiles2],axis=0,sort=True)
data4process=harvardfiles.loc[~(harvardfiles.filename.str.upper().str.contains('SCORE')==True)]
scores4process=harvardfiles.loc[harvardfiles.filename.str.upper().str.contains('SCORE')==True]
box.download_files(data4process.file_id)
box.download_files(scores4process.file_id)
#trick the catcontents macro to create catable dataset, but dont actually cat until you remove the
#PINS in the corrected file from the curated file
#step1 - separate data4process/scores4process into corrected and old curated data
cdata=data4process.loc[data4process.filename.str.contains('corrected')]
cscores=scores4process.loc[scores4process.filename.str.contains('corrected')]
olddata=data4process.loc[~(data4process.filename.str.contains('corrected'))]
oldscores=scores4process.loc[~(scores4process.filename.str.contains('corrected'))]
#create catable dataset for corrected data
hdatainitcorr=catcontents(cdata,box_temp)
hscoreinitcorr=catcontents(cscores,box_temp)
#get list of ids in this corrected data #60 for Harvard
corrl=findpairs(hdatainitcorr,hscoreinitcorr) #this is the list of ids in both scored and raw corrected data
#create catable dataset for old curated data
hdatainitold=catcontents(olddata,box_temp)
hscoreinitold=catcontents(oldscores,box_temp)
#remove the data with PINS from corrected
hdatainitoldsub=hdatainitold[~(hdatainitold.PIN.isin(corrl))]
hscoreinitoldsub=hscoreinitold[~(hscoreinitold.PIN.isin(corrl))]
#now cat the two datasets together
hdatainit=pd.concat([hdatainitcorr,hdatainitoldsub],axis=0,sort=True) #these have 60 more unique pins than before...good
hscoreinit=pd.concat([hscoreinitcorr,hscoreinitoldsub],axis=0,sort=True) #these have 60 more than before...good
l=findpairs(hdatainit,hscoreinit) #this is the list of ids in both scored and raw data
#set aside those who arebnt in both and those that are in dlist or slist
notbothdatalist=hdatainit[~(hdatainit.PIN.isin(l))]
notbothscorelist=hscoreinit[~(hscoreinit.PIN.isin(l))]
nbs=list(notbothscorelist.PIN.unique())
nbd=list(notbothdatalist.PIN.unique())
hdatainit2=hdatainit[hdatainit.PIN.isin(l)]
hscoreinit2=hscoreinit[hscoreinit.PIN.isin(l)]
#check that this is same as above -- it is
#hdatainit2qc=hdatainit[~(hdatainit.PIN.isin(nbs+nbd))]
#hscoreinit2qc=hscoreinit[~(hscoreinit.PIN.isin(nbs+nbd))]
#find instrument duplications that are not identical
dlist,slist=findwierdos(hdatainit2,hscoreinit2)
dslist=pd.concat([dlist,slist],axis=0)
wierdlist=list(dslist.PIN.unique())
#set aside those who are in the wierdlist
nonidenticaldupdata=hdatainit2.loc[hdatainit2.PIN.isin(wierdlist)]
nonidenticaldupscore=hscoreinit2.loc[hscoreinit2.PIN.isin(wierdlist)]
wierdd=list(dlist.PIN.unique())
wierds=list(slist.PIN.unique())
#so we have the notinboth lists and the wierdlists
#Already set aside the notinbothlists
#if we exclude any wierdlist PINs from both, this should get rid of everything that isnt one-to-one
hdatainit3=hdatainit2.loc[~(hdatainit2.PIN.isin(wierdlist))]
hscoreinit3=hscoreinit2.loc[~(hscoreinit2.PIN.isin(wierdlist))]
#both have 580 unique ids - make them into a list
l3=findpairs(hdatainit3,hscoreinit3) #this is the list of ids in both scored and raw data
dlist,slist=findwierdos(hdatainit3,hscoreinit3)
#now delete any identical duplicates check for issues finding wierdos
if dlist.empty and slist.empty:
hdatainit3=hdatainit3.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
hscoreinit3=hscoreinit3.drop_duplicates(subset={'PIN','Inst'})
else:
print('Found Non-Identical Duplications')
print(dlist)
print(slist)
#export scores and data for all pins in dslist or nbs or nbd with flags
notbothdatalist.to_csv(box_temp+'/Toolbox_notinboth_Data_'+snapshotdate+'.csv')
notbothscorelist.to_csv(box_temp+'/Toolbox_notinboth_Scores_'+snapshotdate+'.csv')
box.upload_file(box_temp+'/Toolbox_notinboth_Data_'+snapshotdate+'.csv',needsattnfolder)
box.upload_file(box_temp+'/Toolbox_notinboth_Scores_'+snapshotdate+'.csv',needsattnfolder)
nonidenticaldupdata.to_csv(box_temp+'/Toolbox_NonidentDups_Data_'+snapshotdate+'.csv')
nonidenticaldupscore.to_csv(box_temp+'/Toolbox_NonidentDups_Scores_'+snapshotdate+'.csv')
box.upload_file(box_temp+'/Toolbox_NonidentDups_Data_'+snapshotdate+'.csv',needsattnfolder)
box.upload_file(box_temp+'/Toolbox_NonidentDups_Scores_'+snapshotdate+'.csv',needsattnfolder)
#last but not least...set aside ids not in REDCap, and IDs that need visit numbers
#get reds from hdatatinit3 (should be same as list from hscoreinit3)
#generate hdatainit4 and hscoreinit4 which is relieved of these ids
hdatainit4=subjectsvisits(hdatainit3)
hscoreinit4=subjectsvisits(hscoreinit3)
mv=hscoreinit4.loc[~(hscoreinit4.visit.isin(['V1','V2','V3','X1','X2','X3']))].copy()
mvs=list(mv.subject.unique()) #list of PINs without visit numbers
check=subjectpairs(hdatainit4,hscoreinit4) #this number will be fewer because V1 and V2 PINs for same subject only counted once)
redids=box.getredcapids()
dfcheck=pd.DataFrame(check,columns=['subject'])
boxids=pd.merge(dfcheck,redids,how='left',on='subject',indicator=True)
reds=list(boxids.loc[boxids._merge=='left_only'].subject) #subjects not in redcap
boxandredcap=boxids.loc[boxids._merge=='both'].subject
#export the otherwise cleanest data ready for snapshotting as the new updated curated file -- then run this for all sites befo
#write code here - has only ids with visit numbers and one to one scores and data correspondence and no wierd duplications
#but check one last time that hdatainit5 and hscoreinit5 is super clean
hdatainit5=hdatainit4.loc[~(hdatainit4.subject.isin(mvs+reds))]
hscoreinit5=hscoreinit4.loc[~(hscoreinit4.subject.isin(mvs+reds))]
#export the lists of ids and reasons they were excluded
df=pd.DataFrame(columns=['reason','affectedIDs'])
df=df.append({'reason': 'PIN In Scores but not Data', 'affectedIDs': nbs}, ignore_index=True)
df=df.append({'reason': 'PIN In Data but not Scores', 'affectedIDs': nbd}, ignore_index=True)
df=df.append({'reason': 'PIN/Instrument Non-identical Duplication in Data', 'affectedIDs': wierdd}, ignore_index=True)
df=df.append({'reason': 'PIN/Instrument Non-identical Duplication in Scores', 'affectedIDs': wierds}, ignore_index=True)
df=df.append({'reason': 'PIN/subject in Scores and Data but missing visit', 'affectedIDs': mvs}, ignore_index=True)
df=df.append({'reason': 'subject in Scores and Data but not REDCap ', 'affectedIDs': reds}, ignore_index=True)
df.to_csv(box_temp+'/List_of_IDs_and_Reasons_they_in_these_files_'+snapshotdate+'.csv')
box.upload_file(box_temp+'/List_of_IDs_and_Reasons_they_in_these_files_'+snapshotdate+'.csv',needsattnfolder)
return hdatainit5,hscoreinit5
#get subject and visit from a PIN in a dataframe
def subjectsvisits(hdatainit3):
hdatainit3['subject']=hdatainit3.PIN.str.strip().str[:10]
hdatainit3['visit']=''
hdatainit3.loc[hdatainit3.PIN.str.contains('v1',case=False),'visit']='V1'
hdatainit3.loc[hdatainit3.PIN.str.contains('v2',case=False),'visit']='V2'
hdatainit3.loc[hdatainit3.PIN.str.contains('v3',case=False),'visit']='V3'
hdatainit3.loc[hdatainit3.PIN.str.contains('x1',case=False),'visit']='X1'
hdatainit3.loc[hdatainit3.PIN.str.contains('x2',case=False),'visit']='X2'
hdatainit3.loc[hdatainit3.PIN.str.contains('x3',case=False),'visit']='X3'
return hdatainit3
#pull id visit combos that arent in both scores and data files
def findpairs(hdatainit,hscoreinit):
pinsinboth=[]
for i in hscoreinit.PIN.unique():
if i in hdatainit.PIN.unique() and isinstance(i,str):
pinsinboth=pinsinboth+[i]
else:
print('the following PINs in scores but not data:')
print(i)
for i in hdatainit.PIN.unique():
if i in hscoreinit.PIN.unique():
pass
else:
print('the following PINs in data but not scores:')
print(i)
return pinsinboth
def subjectpairs(hdatainit,hscoreinit):
pinsinboth=[]
for i in hscoreinit.subject.unique():
if i in hdatainit.subject.unique() and isinstance(i,str):
pinsinboth=pinsinboth+[i]
else:
print('the following subjects in scores but not data:')
print(i)
for i in hdatainit.subject.unique():
if i in hscoreinit.subject.unique():
pass
else:
print('the following subjectss in data but not scores:')
print(i)
return pinsinboth
def findwierdos(hdatainit,hscoreinit):
#compare the two types of sort to identify which files have non-identical duplications
sort1data=hdatainit.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
sort1score=hscoreinit.drop_duplicates(subset={'PIN','Inst'})
sort2data=hdatainit.drop_duplicates(subset=set(hdatainit.columns).difference({'filename','file_id'}))
sort2score=hscoreinit.drop_duplicates(subset=set(hscoreinit.columns).difference({'filename','file_id'}))
s1d=sort1data.groupby('PIN').count()
s2d=sort2data.groupby('PIN').count()
databoth=pd.merge(s1d.reset_index()[['PIN','DeviceID']], s2d.reset_index()[['PIN','DeviceID']],on=['PIN','DeviceID'],how='outer',indicator=True)
wierd_data=databoth.loc[databoth._merge!='both'].rename(columns={'DeviceID':'Number of Rows'})
s1s=sort1score.groupby('PIN').count()
s2s=sort2score.groupby('PIN').count()
scoreboth=pd.merge(s1s.reset_index()[['PIN','DeviceID']], s2s.reset_index()[['PIN','DeviceID']],on=['PIN','DeviceID'],how='outer',indicator=True)
wierd_score=scoreboth.loc[scoreboth._merge!='both'].rename(columns={'DeviceID':'Number of Rows'})
return wierd_data,wierd_score
def catcontents(files,cache_space): #dataframe that has filename and file_id as columns
scoresfiles=files.copy()
scoresinit=pd.DataFrame()
for i in scoresfiles.filename:
filepath=os.path.join(cache_space,i)
filenum=scoresfiles.loc[scoresfiles.filename==i,'file_id']
try:
temp=pd.read_csv(filepath,header=0,low_memory=False)
temp['filename']=i
temp['file_id']=pd.Series(int(filenum.values[0]),index=temp.index)
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
except:
print(filepath+' wouldnt import')
temp=pd.DataFrame()
temp['filename']=pd.Series(i,index=[0])
temp['file_id']=pd.Series(int(filenum.values[0]),index=[0])
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
return scoresinit
def catfromlocal(endpoint_temp,scores2cat): #dataframe that has filenames
scoresfiles=scores2cat.copy()
scoresinit=
|
pd.DataFrame()
|
pandas.DataFrame
|
# Import required libraries
import os, re
from random import randint
import flask
from flask import Flask, render_template
import base64, io
import dash
# from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import chart_studio
import chart_studio.plotly as py
import plotly.graph_objects as go
import plotly.figure_factory as ff
from flask import Flask, send_from_directory
import pandas as pd
from numpy import *
import datetime
import itertools
from datetime import datetime as dt
layout = """
<!doctype html>
<!--suppress ALL -->
<html>
<head>
<title>Precipitable Water Model</title>
<link rel="icon" href="https://github.com/physicsgoddess1972/Precipitable-Water-Model/blob/docs/docs/assets/img/icon.png?raw=true">
<link rel="shortcut icon" type="image/png" href="https://github.com/physicsgoddess1972/Precipitable-Water-Model/blob/docs/docs/assets/img/icon.png?raw=true">
{%scripts%}
<script type='text/javascript' src='assets/js/jquery.min.js'></script>
<script type='text/javascript' href='assets/js/materialize.min.js'></script>
<script type='text/javascript' href='assets/js/sidenav.js'></script>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/devicons/[email protected]/devicon.min.css">
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.6.0/css/all.css">
<link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons" >
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Source+Code+Pro&display=swap">
{%css%}
</head>
<body role='flatdoc'>
<div class="demo-layout mdl-layout mdl-js-layout mdl-layout--fixed-drawer mdl-layout--fixed-header">
<header class="demo-header mdl-layout__header mdl-color--grey-100 mdl-color-text--grey-600">
<div class="mdl-layout__header-row">
<span class="mdl-layout-title">Precipitable Water Model</span>
<div class="mdl-layout-spacer" style="padding-right: 50%;"></div>
<a href="changelog.html#v2">
<div class="chip" style="height: 67%">
<div style="display: flex">
<div style="width: 10%;">
<i class="material-icons" style="padding-top: 8px">new_releases</i>
</div>
<div style="width: 90%; padding-left: 20px; padding-top: 6px">
<b>Version 2 is available</b>
</div>
</div>
</div>
</a>
</div>
</header>
<div class="demo-drawer mdl-layout__drawer mdl-color--blue-grey-900 mdl-color-text--blue-grey-50">
<nav class="demo-navigation mdl-navigation mdl-color--blue-grey-900 sidebar">
<ul class="nav flex-column" id="nav_accordion">
<a class="mdl-navigation__link" id="dash-html" href="dash.html"><i class="material-icons" role="presentation">dashboard</i>Home</a>
<li class="nav-item has-submenu">
<a class="mdl-navigation__link nav-link" href="#">Main Project<i class="material-icons" role="presentation">expand_more</i></a>
<ul class="submenu collapse">
<a class="mdl-navigation__link" id="index-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/index.html"><i class="material-icons" role="presentation">chrome_reader_mode</i>Documentation</a>
<a class="mdl-navigation__link" id="contrib-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/contrib.html"><i class="material-icons" role="presentation">group_add</i>Contribute</a>
<a class="mdl-navigation__link" id="code-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/code.html"><i class="devicon-r-plain material-icons"></i>R Features</a>
<a class="mdl-navigation__link" id="deployment-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/deployment.html"><i class="material-icons" role="presentation">build</i>Deployment</a>
<a class="mdl-navigation__link" id="changelog-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/changelog.html"><i class="material-icons" role="presentation">new_releases</i>Changelog</a>
</ul>
</li>
<li class="nav-item has-submenu">
<a class="mdl-navigation__link nav-link" data-toggle="dropdown" href="#">Side Projects <i class="material-icons" role="presentation">expand_more</i></a>
<ul class="submenu collapse">
<a class="mdl-navigation__link" id="machine_learning-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/machine_learning.html"><i class="material-icons">memory</i>Machine Learning</a>
<a class="mdl-navigation__link" id="automation-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/automation.html"><i class="material-icons">smart_toy</i>Automation</a>
</ul>
</li>
<a class="mdl-navigation__link" id="research-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/research.html"><i class="material-icons">science</i>Research</a>
<a class="mdl-navigation__link" onclick="$('#maintainers').modal('open');"><i class="material-icons" role="presentation">face</i>The Maintainers</a>
<hr>
<a class="mdl-navigation__link" href="https://github.com/physicsgoddess1972/Precipitable-Water-Model"><i class="material-icons"><i class="fab fa-github big-icon"></i></i> View on Github</a>
<a class="mdl-navigation__link" href="https://github.com/physicsgoddess1972/Precipitable-Water-Model/archive/master.zip"><i class="material-icons" role="presentation">cloud_download</i>Download the Repo</a>
<a class="mdl-navigation__link" href="https://github.com/physicsgoddess1972/Precipitable-Water-Model/issues"><i class="material-icons" role="presentation">bug_report</i>Bug Report</a>
</ul>
</nav>
</div>
<main class="mdl-layout__content">
<div id="modal-maintainers"></div>
<div id="modal-introduction"></div>
<div class="menubar" style="padding-right: -100%;"></div>
<div class='content'>
<a id="top"></a>
<div class="collapsible">
<div class="collapsible-header">
<h2>Data Dashboard</h2>
</div>
<div class="panel">
{%app_entry%}
</div>
</div>
</div>
<nav class="bottom-nav" style="width: 100%;">
<a class="bottom-nav__action" href="#top">
<svg class="bottom-nav__icon" viewBox="0 0 24 24">
<path d="M4 12l1.41 1.41L11 7.83V20h2V7.83l5.58 5.59L20 12l-8-8-8 8z"/>
</svg>
<span class="bottom-nav__label">Back to Top</span>
</a>
<a class="bottom-nav__action" href="https://pw-ml-dash.uc.r.appspot.com/">
<i class="bottom-nav__icon material-icons" role="presentation" style="margin-bottom: -10px; margin-top: -18px">memory</i>
<span class="bottom-nav__label">Machine Learning</span>
</a>
<a class="bottom-nav__action--current" href="https://pw-data-dash.uc.r.appspot.com/">
<i class="bottom-nav__icon material-icons" role="presentation" style="margin-bottom: -10px; margin-top: -18px">insights</i>
<span class="bottom-nav__label">Data Dashboard</span>
</a>
<a class="bottom-nav__action" href="https://pw-map-dash.uc.r.appspot.com/">
<i class="bottom-nav__icon material-icons" role="presentation" style="margin-bottom: -10px; margin-top: -18px">travel_explore</i>
<span class="bottom-nav__label">Import Config</span>
</a>
</nav>
</main>
</div>
{%config%}
<script src="https://code.getmdl.io/1.3.0/material.min.js"></script>
{%renderer%}
</body>
</html>
"""
app = dash.Dash(__name__, assets_folder='assets',
index_string=layout, external_scripts=['https://code.getmdl.io/1.3.0/material.min.js'])
server = app.server
df = pd.read_csv(
"https://raw.githubusercontent.com/physicsgoddess1972/Precipitable-Water-Model/pmat-socorro-nm/data/master_data.csv")
def parse_data(contents, filename, clear):
if clear == 0:
try:
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
except AttributeError:
df = pd.read_csv(
"https://raw.githubusercontent.com/physicsgoddess1972/Precipitable-Water-Model/pmat-socorro-nm/data/master_data.csv")
elif clear > 0:
df = pd.read_csv(
"https://raw.githubusercontent.com/physicsgoddess1972/Precipitable-Water-Model/pmat-socorro-nm/data/master_data.csv")
return df
def getIndexes(dfObj, value):
''' Get index positions of value in dataframe i.e. dfObj.'''
listOfPos = list()
# Get bool dataframe with True at positions where the given value exists
result = dfObj.isin([value])
# Get list of columns that contains the value
seriesObj = result.any()
columnNames = list(seriesObj[seriesObj == True].index)
# Iterate over list of columns and fetch the rows indexes where value exists
for col in columnNames:
rows = list(result[col][result[col] == True].index)
for row in rows:
listOfPos.append((row, col))
# Return a list of tuples indicating the positions of value in the dataframe
return listOfPos
def layout():
return html.Div(children=[
html.Div(children=[
html.Div(children=[
html.Div(children=[
dcc.Upload(
id='upload-data',
children=[html.Button("add",
className="bottom-nav__icon material-icons",
style={'display': 'block', 'width': '40px', 'height': '100%',
'background-color': '#FFF', 'border-color': '#DDD',
'border-width': '2px', 'margin-left': '14px',
'margin-right': '8px'})],
),
html.Label("Upload", style={'color': 'black', 'padding-left': '0px', 'textAlign': 'left'})
], style={'display': 'flex'}),
html.Div([
html.Button("clear",
id='clear',
className="bottom-nav__icon material-icons",
n_clicks=0,
style={'display': 'block', 'height': '100%', 'width': '40px',
'background-color': '#FFF', 'border-color': '#DDD', 'border-width': '2px',
'margin-left': '13px', 'margin-right': '10px'}
),
html.Label("Clear", style={'color': 'black', 'textAlign': 'left'})
], style={'display': 'flex'}),
],
style={'margin-right': '19em'}
),
dcc.DatePickerRange(
id='daterng',
style={'textAlign': 'right'}
),
], style={'padding-bottom': 20, 'display': 'flex'}),
dcc.Tabs([
dcc.Tab(label="Time Series", children=[
html.Div([
html.Label("Y axis: ",
style={"color": "#000",
'padding-top': 10,
'padding-left': 10}),
dcc.Dropdown(id='timedata', style={'padding-left': 10, 'width': 250}),
], style={'display': 'flex', 'margin-top': 20}),
dcc.Tabs([
dcc.Tab(label="Scatter Plot", children=[
dcc.Graph(id='scatter-time')
]),
dcc.Tab(label="Heatmaps", children=[
dcc.Graph(id='heat-time')
])
], style={'padding-top': 20})
]),
dcc.Tab(label="Analytical", children=[
html.Div([
html.Label("X axis: ",
style={"color": "#000",
'padding-top': 15,
'margin-right': 10}),
dcc.Dropdown(id='analydata1', style={'width': 250, 'margin-right': 50}),
html.Label("Y axis: ",
style={"color": "#000",
'padding-top': 15, 'margin-right': 10}),
dcc.Dropdown(id='analydata2', style={'width': 250}),
], style={'display': 'flex', 'margin-top': 10}),
dcc.Graph(id='scatter-analy')
]),
# dcc.Tab(label="Charts", children=[
# dcc.Dropdown(id='chart-data',
# options=[{'label': i, 'value': i} for i in ['Ground Temperature', 'Sky Temperature', 'Delta Temperature']],
# value="Ground Temperature"),
# dcc.Graph(id='chart')
# ])
])
])
app.layout = layout()
#
@app.callback(
[dash.dependencies.Output('timedata', 'value'),
dash.dependencies.Output('timedata', 'options'),
dash.dependencies.Output('analydata1', 'value'),
dash.dependencies.Output('analydata1', 'options'),
dash.dependencies.Output('analydata2', 'options'),
dash.dependencies.Output('analydata2', 'value')],
[dash.dependencies.Input('upload-data', 'contents'),
dash.dependencies.Input('upload-data', 'filename'),
dash.dependencies.Input('clear', 'n_clicks')])
def update_dropdown(data, fname, clear):
df = parse_data(data, fname, clear)
opt = df.columns[4:18]
options = [{'label': i.replace("_", " "), 'value': i} for i in opt]
value = opt[0]
return value, options, value, options, options, value
@app.callback(
[dash.dependencies.Output('daterng', 'min_date_allowed'),
dash.dependencies.Output('daterng', 'max_date_allowed'),
dash.dependencies.Output('daterng', 'start_date'),
dash.dependencies.Output('daterng', 'end_date'),
dash.dependencies.Output('daterng', 'with_portal')],
[dash.dependencies.Input('upload-data', 'contents'),
dash.dependencies.Input('upload-data', 'filename'),
dash.dependencies.Input('clear', 'n_clicks')]
)
def update_timerng(data, fname, clear):
df = parse_data(data, fname, clear)
thing = [dt.strptime(df.Date[0], "%m/%d/%Y"),
dt.strptime(df.Date[len(df) - 1], "%m/%d/%Y"),
dt.strptime(df.Date[0], "%m/%d/%Y").date(),
dt.strptime(df.Date[len(df) - 1], "%m/%d/%Y").date(),
True]
return thing[0], thing[1], thing[2], thing[3], thing[4]
@app.callback(
dash.dependencies.Output('scatter-time', 'figure'),
[dash.dependencies.Input('timedata', 'value'),
dash.dependencies.Input('daterng', 'start_date'),
dash.dependencies.Input('daterng', 'end_date'),
dash.dependencies.Input('upload-data', 'contents'),
dash.dependencies.Input('upload-data', 'filename'),
dash.dependencies.Input('clear', 'n_clicks')]
)
def time_scatter_plot(timedata, start, end, data, fname, clear):
df = parse_data(data, fname, clear)
start_date = dt.strptime(start, "%Y-%m-%d").strftime('%m/%d/%Y')
end_date = dt.strptime(end, "%Y-%m-%d").strftime('%m/%d/%Y')
s = getIndexes(df, start_date)[0][0]
e = getIndexes(df, end_date)[0][0]
hovertext = list()
for yi, xi in zip(df[timedata][s:e], df.Date[s:e]):
hovertext.append('{}: {}<br />Date: {}'.format(timedata.replace("_", " "), yi, xi))
data = [{
'x': df.Date[s:e],
'y': df[timedata][s:e],
'mode': 'markers',
'marker': {'color': '#0897FF'},
'text': hovertext,
'hoverinfo': 'text',
}]
return {'data': data,
'layout': {'xaxis': {'nticks': 5,
'tickfont': {'size': 10, 'color': 'black'},
'title': "Date"},
'yaxis': {'title': timedata.replace("_", " "),
'tickfont': {'size': 10, 'color': 'black'}},
'title': "Time Series of {}".format(timedata.replace("_", " ")),
}
}
@app.callback(
dash.dependencies.Output('heat-time', 'figure'),
[dash.dependencies.Input('timedata', 'value'),
dash.dependencies.Input('daterng', 'start_date'),
dash.dependencies.Input('daterng', 'end_date'),
dash.dependencies.Input('upload-data', 'contents'),
dash.dependencies.Input('upload-data', 'filename'),
dash.dependencies.Input('clear', 'n_clicks')]
)
def time_heat_map(timedata, start, end, data, fname, clear):
df = parse_data(data, fname, clear)
s = getIndexes(df, dt.strptime(start, "%Y-%m-%d").strftime('%m/%d/%Y'))[0][0]
e = getIndexes(df, dt.strptime(end, "%Y-%m-%d").strftime('%m/%d/%Y'))[0][0]
delta =
|
pd.to_datetime(df.Date[e])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from unittest import mock
import pandas
import pandas.testing
import pytest
import google.api_core.exceptions
from google.cloud.bigquery_storage import types
from .helpers import SCALAR_COLUMNS, SCALAR_COLUMN_NAMES, SCALAR_BLOCKS
pyarrow = pytest.importorskip("pyarrow")
# This dictionary is duplicated in bigquery/google/cloud/bigquery/_pandas_helpers.py
# When modifying it be sure to update it there as well.
BQ_TO_ARROW_TYPES = {
"int64": pyarrow.int64(),
"float64": pyarrow.float64(),
"bool": pyarrow.bool_(),
"numeric": pyarrow.decimal128(38, 9),
"string": pyarrow.utf8(),
"bytes": pyarrow.binary(),
"date": pyarrow.date32(), # int32 days since epoch
"datetime": pyarrow.timestamp("us"),
"time": pyarrow.time64("us"),
"timestamp": pyarrow.timestamp("us", tz="UTC"),
}
@pytest.fixture()
def mut():
from google.cloud.bigquery_storage_v1 import reader
return reader
@pytest.fixture()
def class_under_test(mut):
return mut.ReadRowsStream
@pytest.fixture()
def mock_gapic_client():
from google.cloud.bigquery_storage_v1.services import big_query_read
return mock.create_autospec(big_query_read.BigQueryReadClient)
def _bq_to_arrow_batch_objects(bq_blocks, arrow_schema):
arrow_batches = []
for block in bq_blocks:
arrays = []
for name in arrow_schema.names:
arrays.append(
pyarrow.array(
(row[name] for row in block),
type=arrow_schema.field(name).type,
size=len(block),
)
)
arrow_batches.append(
pyarrow.RecordBatch.from_arrays(arrays, schema=arrow_schema)
)
return arrow_batches
def _bq_to_arrow_batches(bq_blocks, arrow_schema):
arrow_batches = []
first_message = True
for record_batch in _bq_to_arrow_batch_objects(bq_blocks, arrow_schema):
response = types.ReadRowsResponse()
response.arrow_record_batch.serialized_record_batch = (
record_batch.serialize().to_pybytes()
)
if first_message:
response.arrow_schema = {
"serialized_schema": arrow_schema.serialize().to_pybytes(),
}
first_message = False
arrow_batches.append(response)
return arrow_batches
def _bq_to_arrow_schema(bq_columns):
def bq_col_as_field(column):
metadata = None
if column.get("description") is not None:
metadata = {"description": column.get("description")}
name = column["name"]
type_ = BQ_TO_ARROW_TYPES[column["type"]]
mode = column.get("mode", "nullable").lower()
return pyarrow.field(name, type_, mode == "nullable", metadata)
return pyarrow.schema(bq_col_as_field(c) for c in bq_columns)
def _generate_arrow_read_session(arrow_schema):
return types.ReadSession(
arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()}
)
def _pages_w_unavailable(pages):
for page in pages:
yield page
raise google.api_core.exceptions.ServiceUnavailable("test: please reconnect")
def test_pyarrow_rows_raises_import_error(
mut, class_under_test, mock_gapic_client, monkeypatch
):
monkeypatch.setattr(mut, "pyarrow", None)
arrow_schema = _bq_to_arrow_schema(SCALAR_COLUMNS)
arrow_batches = _bq_to_arrow_batches(SCALAR_BLOCKS, arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
rows = iter(reader.rows())
# Since session isn't passed in, reader doesn't know serialization type
# until you start iterating.
with pytest.raises(ImportError):
next(rows)
def test_to_arrow_no_pyarrow_raises_import_error(
mut, class_under_test, mock_gapic_client, monkeypatch
):
monkeypatch.setattr(mut, "pyarrow", None)
arrow_schema = _bq_to_arrow_schema(SCALAR_COLUMNS)
arrow_batches = _bq_to_arrow_batches(SCALAR_BLOCKS, arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
with pytest.raises(ImportError):
reader.to_arrow()
with pytest.raises(ImportError):
reader.rows().to_arrow()
with pytest.raises(ImportError):
next(reader.rows().pages).to_arrow()
def test_to_arrow_w_scalars_arrow(class_under_test):
arrow_schema = _bq_to_arrow_schema(SCALAR_COLUMNS)
arrow_batches = _bq_to_arrow_batches(SCALAR_BLOCKS, arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
actual_table = reader.to_arrow()
expected_table = pyarrow.Table.from_batches(
_bq_to_arrow_batch_objects(SCALAR_BLOCKS, arrow_schema)
)
assert actual_table == expected_table
def test_to_dataframe_w_scalars_arrow(class_under_test):
arrow_schema = _bq_to_arrow_schema(SCALAR_COLUMNS)
arrow_batches = _bq_to_arrow_batches(SCALAR_BLOCKS, arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
got = reader.to_dataframe()
expected = pandas.DataFrame(
list(itertools.chain.from_iterable(SCALAR_BLOCKS)), columns=SCALAR_COLUMN_NAMES
)
pandas.testing.assert_frame_equal(
got.reset_index(drop=True), # reset_index to ignore row labels
expected.reset_index(drop=True),
)
def test_rows_w_empty_stream_arrow(class_under_test, mock_gapic_client):
reader = class_under_test([], mock_gapic_client, "", 0, {})
got = reader.rows()
assert tuple(got) == ()
def test_rows_w_scalars_arrow(class_under_test, mock_gapic_client):
arrow_schema = _bq_to_arrow_schema(SCALAR_COLUMNS)
arrow_batches = _bq_to_arrow_batches(SCALAR_BLOCKS, arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
got = tuple(
dict((key, value.as_py()) for key, value in row_dict.items())
for row_dict in reader.rows()
)
expected = tuple(itertools.chain.from_iterable(SCALAR_BLOCKS))
assert got == expected
def test_to_dataframe_w_dtypes_arrow(class_under_test):
arrow_schema = _bq_to_arrow_schema(
[
{"name": "bigfloat", "type": "float64"},
{"name": "lilfloat", "type": "float64"},
]
)
blocks = [
[{"bigfloat": 1.25, "lilfloat": 30.5}, {"bigfloat": 2.5, "lilfloat": 21.125}],
[{"bigfloat": 3.75, "lilfloat": 11.0}],
]
arrow_batches = _bq_to_arrow_batches(blocks, arrow_schema)
reader = class_under_test(arrow_batches, mock_gapic_client, "", 0, {})
got = reader.to_dataframe(dtypes={"lilfloat": "float16"})
expected = pandas.DataFrame(
{
"bigfloat": [1.25, 2.5, 3.75],
"lilfloat":
|
pandas.Series([30.5, 21.125, 11.0], dtype="float16")
|
pandas.Series
|
# Copyright (c) 2020, MD2K Center of Excellence
# - <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import pandas as pd
from pyspark.sql import functions as F
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.group import GroupedData
from pyspark.sql.types import *
from pyspark.sql.types import StructType
from pyspark.sql.window import Window
from cerebralcortex.core.datatypes.datastream import DataStream
from cerebralcortex.core.metadata_manager.stream.metadata import Metadata
def complementary_filter(ds, freq: int = 16, accelerometer_x: str = "accelerometer_x",
accelerometer_y: str = "accelerometer_y", accelerometer_z: str = "accelerometer_z",
gyroscope_x: str = "gyroscope_x", gyroscope_y: str = "gyroscope_y",
gyroscope_z: str = "gyroscope_z"):
"""
Compute complementary filter on gyro and accel data.
Args:
ds (DataStream ): Non-Windowed/grouped dataframe
freq (int): frequency of accel/gryo. Assumption is that frequency is equal for both gyro and accel.
accelerometer_x (str): name of the column
accelerometer_y (str): name of the column
accelerometer_z (str): name of the column
gyroscope_x (str): name of the column
gyroscope_y (str): name of the column
gyroscope_z (str): name of the column
"""
dt = 1.0 / freq # 1/16.0;
M_PI = math.pi;
hpf = 0.90;
lpf = 0.10;
window = Window.partitionBy(ds._data['user']).orderBy(ds._data['timestamp'])
data = ds._data.withColumn("thetaX_accel",
((F.atan2(-F.col(accelerometer_z), F.col(accelerometer_y)) * 180 / M_PI)) * lpf) \
.withColumn("roll",
(F.lag("thetaX_accel").over(window) + F.col(gyroscope_x) * dt) * hpf + F.col("thetaX_accel")).drop(
"thetaX_accel") \
.withColumn("thetaY_accel",
((F.atan2(-F.col(accelerometer_x), F.col(accelerometer_z)) * 180 / M_PI)) * lpf) \
.withColumn("pitch",
(F.lag("thetaY_accel").over(window) + F.col(gyroscope_y) * dt) * hpf + F.col("thetaY_accel")).drop(
"thetaY_accel") \
.withColumn("thetaZ_accel",
((F.atan2(-F.col(accelerometer_y), F.col(accelerometer_x)) * 180 / M_PI)) * lpf) \
.withColumn("yaw",
(F.lag("thetaZ_accel").over(window) + F.col(gyroscope_z) * dt) * hpf + F.col("thetaZ_accel")).drop(
"thetaZ_accel")
return DataStream(data=data.dropna(), metadata=Metadata())
def compute_zero_cross_rate(ds, exclude_col_names: list = [],
feature_names=['zero_cross_rate']):
"""
Compute statistical features.
Args:
ds (DataStream ): Windowed/grouped dataframe
exclude_col_names list(str): name of the columns on which features should not be computed
feature_names list(str): names of the features. Supported features are ['mean', 'median', 'stddev', 'variance', 'max', 'min', 'skew',
'kurt', 'sqr', 'zero_cross_rate'
windowDuration (int): duration of a window in seconds
slideDuration (int): slide duration of a window
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
Returns:
DataStream object
"""
exclude_col_names.extend(["timestamp", "localtime", "user", "version"])
data = ds._data.drop(*exclude_col_names)
df_column_names = data.columns
basic_schema = StructType([
StructField("timestamp", TimestampType()),
StructField("localtime", TimestampType()),
StructField("user", StringType()),
StructField("version", IntegerType()),
StructField("start_time", TimestampType()),
StructField("end_time", TimestampType())
])
features_list = []
for cn in df_column_names:
for sf in feature_names:
features_list.append(StructField(cn + "_" + sf, FloatType(), True))
features_schema = StructType(basic_schema.fields + features_list)
def calculate_zero_cross_rate(series):
"""
How often the signal changes sign (+/-)
"""
series_mean = np.mean(series)
series = [v - series_mean for v in series]
zero_cross_count = (np.diff(np.sign(series)) != 0).sum()
return zero_cross_count / len(series)
@pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)
def get_features_udf(df):
results = []
timestamp = df['timestamp'].iloc[0]
localtime = df['localtime'].iloc[0]
user = df['user'].iloc[0]
version = df['version'].iloc[0]
start_time = timestamp
end_time = df['timestamp'].iloc[-1]
df.drop(exclude_col_names, axis=1, inplace=True)
if "zero_cross_rate" in feature_names:
df_zero_cross_rate = df.apply(calculate_zero_cross_rate)
df_zero_cross_rate.index += '_zero_cross_rate'
results.append(df_zero_cross_rate)
output = pd.DataFrame(
|
pd.concat(results)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 5 21:13:34 2016
@author: Marty
"""
from __future__ import absolute_import, print_function, division, unicode_literals
import unittest
from unittest import mock
import pandas as pd
from pandas.testing import assert_frame_equal
import numpy as np
from hydrofunctions import station, typing
from .fixtures import (
fakeResponse,
recent_only,
)
class TestingNWIS(station.NWIS):
"""
This subclass of NWIS is for testing all of the NWIS methods except
__init__, which we will replace. All of the other methods get inherited
verbatim, so we can test them using TestingNWIS instead of NWIS.
"""
def __init__(
self,
site=None,
service=None,
start_date=None,
end_date=None,
dataframe=None,
meta=None,
start=None,
end=None,
):
self.site = site
self.service = service
self.start_date = start_date
self.end_date = end_date
self._dataframe = dataframe
self.meta = meta
self.start = start
self.end = end
class TestStation(unittest.TestCase):
def test_station_is_obj(self):
actual = station.Station()
self.assertIsInstance(actual, station.Station)
def test_station_site_defaults_to_None(self):
actual = station.Station()
self.assertIsNone(actual.site)
def test_station_id_sets(self):
expected = "01234567"
actual = station.Station(expected)
another = station.Station("23456789")
self.assertEqual(actual.site, expected)
self.assertEqual(another.site, "23456789")
def test_station_dict_returns_dict(self):
actual = station.Station("first")
self.assertIsInstance(actual.station_dict, dict)
def test_multiple_instances_only_one_list(self):
first = station.Station("first")
second = station.Station("second")
self.assertEqual(first.station_dict, second.station_dict)
def test_station_dict_keeps_keys(self):
first = station.Station("first")
second = station.Station("second")
actual = first.station_dict
self.assertIn("first", actual)
self.assertIn("second", actual)
self.assertEqual(
len(actual),
2,
"The dict length is not equal to the \
number of instances",
)
def test_station_dict_returns_instance(self):
first = station.Station("first")
second = station.Station("second")
expected = first
# Look at the station_dict; does it contain a ref to 'first'?
actual = second.station_dict["first"]
self.assertEqual(actual, expected)
def test_station_subclasses_maintain_same_station_dict(self):
class Foo(station.Station):
pass
foo_inst = Foo("foo")
station_inst = station.Station("station")
self.assertIn("station", foo_inst.station_dict)
self.assertIn("foo", station_inst.station_dict)
actual = station_inst.station_dict["foo"]
self.assertIsInstance(actual, Foo)
class TestNWISinit(unittest.TestCase):
@mock.patch("hydrofunctions.hydrofunctions.get_nwis")
@mock.patch("hydrofunctions.hydrofunctions.get_nwis_property")
def test_NWIS_init_check_defaults(self, mock_get_nwis_property, mock_get_nwis):
default_site = None
default_service = "dv"
default_start = None
default_end = None
default_parameterCd = "all"
default_period = None
default_stateCd = None
default_countyCd = None
default_bBox = None
mock_get_nwis_property.return_value = "expected"
mock_get_nwis.return_value = fakeResponse()
station.NWIS()
mock_get_nwis.assert_called_once_with(
default_site,
default_service,
default_start,
default_end,
parameterCd=default_parameterCd,
period=default_period,
stateCd=default_stateCd,
countyCd=default_countyCd,
bBox=default_bBox,
)
self.assertTrue(mock_get_nwis_property)
@mock.patch("hydrofunctions.hydrofunctions.get_nwis")
@mock.patch("hydrofunctions.hydrofunctions.get_nwis_property")
def test_NWIS_init_calls_get_nwis_and_get_prop(
self, mock_get_nwis_property, mock_get_nwis
):
site = "expected site"
service = "expected service"
start = "expected start"
end = "expected end"
parameterCd = "expected pCode"
mock_get_nwis_property.return_value = "expected"
mock_get_nwis.return_value = fakeResponse()
station.NWIS(site, service, start, end, parameterCd=parameterCd)
mock_get_nwis.assert_called_once_with(
site,
service,
start,
end,
parameterCd=parameterCd,
period=None,
stateCd=None,
countyCd=None,
bBox=None,
)
self.assertTrue(mock_get_nwis_property)
@mock.patch("hydrofunctions.hydrofunctions.get_nwis")
@mock.patch("hydrofunctions.hydrofunctions.get_nwis_property")
@mock.patch("hydrofunctions.hydrofunctions.extract_nwis_df")
def test_NWIS_init_sets_url_ok_json(
self, mock_extract_nwis_df, mock_get_nwis_property, mock_get_nwis
):
expected_url = "expected url"
expected_ok = True
expected_json = "expected json"
mock_get_nwis.return_value = fakeResponse(
code=200, url=expected_url, json=expected_json
)
mock_df = pd.DataFrame(
np.random.randn(5, 1),
columns=["A"],
index=pd.date_range("20130101", periods=5, freq="T"),
)
mock_extract_nwis_df.return_value = (mock_df, "expected_dict")
actual = station.NWIS()
# self.assertEqual(actual.url, expected_url, "NWIS.__init__() did not set self.url properly.")
self.assertEqual(
actual.ok, expected_ok, "NWIS.__init__() did not set self.ok properly."
)
self.assertEqual(
actual.json,
expected_json,
"NWIS.__init__() did not set self.json properly.",
)
@mock.patch("hydrofunctions.hydrofunctions.get_nwis")
@mock.patch("hydrofunctions.hydrofunctions.get_nwis_property")
@mock.patch("hydrofunctions.hydrofunctions.extract_nwis_df")
def test_NWIS_init_calls_extract_nwis_df(
self, mock_extract_nwis_df, mock_get_nwis_property, mock_get_nwis
):
expected_json = "expected json"
mock_get_nwis.return_value = fakeResponse(json=expected_json)
mock_df = pd.DataFrame(
np.random.randn(5, 1),
columns=["A"],
index=pd.date_range("20130101", periods=5, freq="T"),
)
mock_extract_nwis_df.return_value = (mock_df, "expected dict")
actual = station.NWIS()
mock_extract_nwis_df.assert_called_once_with(expected_json)
@mock.patch("hydrofunctions.hydrofunctions.read_parquet")
def test_NWIS_init_filename_calls_read_parquet(self, mock_read):
expected_filename = "expected_filename"
expected_meta = "expected meta"
expected_df = pd.DataFrame(
np.random.randn(5, 1),
columns=["A"],
index=pd.date_range("20130101", periods=5, freq="T"),
)
mock_start = "expected start"
mock_end = "expected end"
mock_read.return_value = (expected_df, expected_meta)
actual = station.NWIS(file=expected_filename)
mock_read.assert_called_once_with(expected_filename)
assert_frame_equal(expected_df, actual._dataframe)
self.assertEqual(
expected_meta,
actual.meta,
"The metadata were not retrieved by NWIS.read().",
)
@mock.patch("hydrofunctions.hydrofunctions.read_parquet")
@mock.patch("hydrofunctions.hydrofunctions.get_nwis")
# @mock.patch("hydrofunctions.hydrofunctions.get_nwis_property")
@mock.patch("hydrofunctions.hydrofunctions.extract_nwis_df")
@mock.patch("hydrofunctions.hydrofunctions.save_parquet")
def test_NWIS_init_filename_calls_read_parquet_then_get_nwis(
self, mock_save, mock_extract_nwis_df, mock_get_nwis, mock_read
):
# Mocks listed in order that they get called.
# mock_read: pretend file doesn't exist, so return OSError
# file exists:
# mock_read.return_value = (expected_df, expected_meta)
# file doesn't exist, raise error:
mock_read.side_effect = OSError()
# mock_get_nwis
expected_json = "expected json"
mock_get_nwis.return_value = fakeResponse(json=expected_json)
# mock_get_nwis_property
# never called
# mock_extract_nwis_df
mock_df = pd.DataFrame(
np.random.randn(5, 1),
columns=["A"],
index=pd.date_range("20130101", periods=5, freq="T"),
)
mock_meta = "mock meta"
mock_extract_nwis_df.return_value = (mock_df, mock_meta)
# mock_save
expected_filename = "expected_filename"
mock_save.return_value = "expected self"
# Create an NWIS with a filename, but the filename doesn't exist.
# so an OSError is returned.
# So now get_nwis is called, extract_nwis_df, save().
actual = station.NWIS(file=expected_filename)
mock_save.assert_called_once_with(expected_filename, mock_df, mock_meta)
@mock.patch("hydrofunctions.hydrofunctions.get_nwis")
def test_NWIS_init_request_most_recent_only(self, mock_get_nwis):
expected_json = recent_only
expected_url = (
"https://waterservices.usgs.gov/nwis/dv/?format=json%2C1.1&sites=01541200"
)
mock_get_nwis.return_value = fakeResponse(json=expected_json, url=expected_url)
actual = station.NWIS("01541200")
self.assertEqual(
actual.df().shape,
(2, 4),
"The dataframe should only have two rows and four columns.",
)
class TestNWISmethods(unittest.TestCase):
"""
Tests for NWIS methods
The following section is for testing all of the NWIS methods
EXCEPT the NWIS.__init__() method.
Creating an NWIS instance will always run the __init__ method, which we
usually don't want to do. It calls a bunch of functions that we test
elsewhere and it causes a bunch of side effects that we don't want. Yes,
you can mock all of the functions that __init__ calls, but even then there
can be unwanted side effects not to mention it can be tedious to mock so
many different things.
To test any method other than __init__, we will use the following strategy:
- create a sub-class of NWIS called TestingNWIS.
- TestingNWIS has a different __init__ method that allows you to pass
in a dataframe and any other initial parameter
- all other methods gets inherited from NWIS, so we can test them.
"""
def test_NWIS_df_returns_all_columns(self):
expected_cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=expected_cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df()
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols, expected_cols, "NWIS.df() should return all of the columns."
)
def test_NWIS_df_all_returns_all_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = cols
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("all")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('all') should return all of the columns.",
)
def test_NWIS_df_data_returns_data_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = [
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("data")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('data') should return all of the data columns.",
)
def test_NWIS_df_discharge_returns_discharge_data_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = ["USGS:01541200:00060:00000", "USGS:01541303:00060:00000"]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("discharge")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('discharge') should return all of the discharge data columns.",
)
def test_NWIS_df_q_returns_discharge_data_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = ["USGS:01541200:00060:00000", "USGS:01541303:00060:00000"]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("q")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('q') should return all of the discharge data columns.",
)
def test_NWIS_df_stage_returns_stage_data_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = ["USGS:01541200:00065:00000", "USGS:01541303:00065:00000"]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("stage")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('stage') should return all of the stage data columns.",
)
def test_NWIS_df_flags_returns_qualifiers_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00065:00000_qualifiers",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df =
|
pd.DataFrame(data=data, columns=cols)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""System operating cost plots.
This module plots figures related to the cost of operating the power system.
Plots can be broken down by cost categories, generator types etc.
@author: <NAME>
"""
import logging
import pandas as pd
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import PlotLibrary
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""production_cost MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The production_cost.py module contains methods that are
related related to the cost of operating the power system.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
def prod_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Plots total system net revenue and cost normalized by the installed capacity of the area.
Total revenue is made up of reserve and energy revenues which are displayed in a stacked
bar plot with total generation cost. Net revensue is represented by a dot.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Total_Generation_Cost", self.Scenarios),
(True, "generator_Pool_Revenue", self.Scenarios),
(True, "generator_Reserves_Revenue", self.Scenarios),
(True, "generator_Installed_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Installed_Capacity = self["generator_Installed_Capacity"].get(scenario)
#Check if zone has installed generation, if not skips
try:
Total_Installed_Capacity = Total_Installed_Capacity.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No installed capacity in : {zone_input}")
continue
Total_Installed_Capacity = self.df_process_gen_inputs(Total_Installed_Capacity)
Total_Installed_Capacity.reset_index(drop=True, inplace=True)
Total_Installed_Capacity = Total_Installed_Capacity.iloc[0]
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
Total_Gen_Cost = self.df_process_gen_inputs(Total_Gen_Cost)
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)*-1
# Total_Gen_Cost = Total_Gen_Cost/Total_Installed_Capacity #Change to $/MW-year
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Pool_Revenues = self["generator_Pool_Revenue"].get(scenario)
Pool_Revenues = Pool_Revenues.xs(zone_input,level=self.AGG_BY)
Pool_Revenues = self.df_process_gen_inputs(Pool_Revenues)
Pool_Revenues = Pool_Revenues.sum(axis=0)
# Pool_Revenues = Pool_Revenues/Total_Installed_Capacity #Change to $/MW-year
Pool_Revenues.rename("Energy_Revenues", inplace=True)
### Might change to Net Reserve Revenue at later date
Reserve_Revenues = self["generator_Reserves_Revenue"].get(scenario)
Reserve_Revenues = Reserve_Revenues.xs(zone_input,level=self.AGG_BY)
Reserve_Revenues = self.df_process_gen_inputs(Reserve_Revenues)
Reserve_Revenues = Reserve_Revenues.sum(axis=0)
# Reserve_Revenues = Reserve_Revenues/Total_Installed_Capacity #Change to $/MW-year
Reserve_Revenues.rename("Reserve_Revenues", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost,
Pool_Revenues, Reserve_Revenues],
axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost = Total_Systems_Cost.sum(axis=0)
Total_Systems_Cost = Total_Systems_Cost.rename(scenario)
total_cost_chunk.append(Total_Systems_Cost)
Total_Systems_Cost_Out = pd.concat(total_cost_chunk, axis=1, sort=False)
Total_Systems_Cost_Out = Total_Systems_Cost_Out.T
Total_Systems_Cost_Out.index = Total_Systems_Cost_Out.index.str.replace('_',' ')
# Total_Systems_Cost_Out = Total_Systems_Cost_Out/1000 #Change to $/kW-year
Total_Systems_Cost_Out = Total_Systems_Cost_Out/1e6 #Convert cost to millions
if pd.notna(custom_data_file_path):
Total_Systems_Cost_Out = self.insert_custom_data_columns(
Total_Systems_Cost_Out,
custom_data_file_path)
Net_Revenue = Total_Systems_Cost_Out.sum(axis=1)
#Checks if Net_Revenue contains data, if not skips zone and does not return a plot
if Net_Revenue.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# Data table of values to return to main program
Data_Table_Out = Total_Systems_Cost_Out.add_suffix(" (Million $)")
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Systems_Cost_Out.index
mplt.barplot(Total_Systems_Cost_Out, stacked=True,
custom_tick_labels=tick_labels)
ax.plot(Net_Revenue.index, Net_Revenue.values,
color='black', linestyle='None', marker='o',
label='Net Revenue')
ax.set_ylabel('Total System Net Rev, Rev, & Cost ($/KW-yr)', color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def sys_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Creates a stacked bar plot of Total Generation Cost and Cost of Unserved Energy.
Plot only shows totals and is NOT broken down into technology or cost type
specific values.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Total_Generation_Cost",self.Scenarios),
(False,f"{agg}_Cost_Unserved_Energy",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
try:
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Cost_Unserved_Energy = self[f"{agg}_Cost_Unserved_Energy"][scenario]
if Cost_Unserved_Energy.empty:
Cost_Unserved_Energy = self["generator_Total_Generation_Cost"][scenario].copy()
Cost_Unserved_Energy.iloc[:,0] = 0
Cost_Unserved_Energy = Cost_Unserved_Energy.xs(zone_input,level=self.AGG_BY)
Cost_Unserved_Energy = Cost_Unserved_Energy.sum(axis=0)
Cost_Unserved_Energy.rename("Cost_Unserved_Energy", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost, Cost_Unserved_Energy],
axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost.rename({0:scenario}, axis='index', inplace=True)
total_cost_chunk.append(Total_Systems_Cost)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not total_cost_chunk:
outputs[zone_input] = MissingZoneData()
continue
Total_Systems_Cost_Out = pd.concat(total_cost_chunk, axis=0, sort=False)
Total_Systems_Cost_Out = Total_Systems_Cost_Out/1000000 #Convert cost to millions
Total_Systems_Cost_Out.index = Total_Systems_Cost_Out.index.str.replace('_',' ')
#Checks if Total_Systems_Cost_Out contains data, if not skips zone and does not return a plot
if Total_Systems_Cost_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
if pd.notna(custom_data_file_path):
Total_Systems_Cost_Out = self.insert_custom_data_columns(
Total_Systems_Cost_Out,
custom_data_file_path)
# Data table of values to return to main program
Data_Table_Out = Total_Systems_Cost_Out.add_suffix(" (Million $)")
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Systems_Cost_Out.index
mplt.barplot(Total_Systems_Cost_Out, stacked=True,
custom_tick_labels=tick_labels)
ax.set_ylabel('Total System Cost (Million $)',
color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
cost_totals = Total_Systems_Cost_Out.sum(axis=1) #holds total of each bar
#inserts values into bar stacks
for patch in ax.patches:
width, height = patch.get_width(), patch.get_height()
if height<=1:
continue
x, y = patch.get_xy()
ax.text(x+width/2,
y+height/2,
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=12)
#inserts total bar value above each bar
for k, patch in enumerate(ax.patches):
height = cost_totals[k]
width = patch.get_width()
x, y = patch.get_xy()
ax.text(x+width/2,
y+height + 0.05*max(ax.get_ylim()),
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=15, color='red')
if k>=len(cost_totals)-1:
break
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def detailed_gen_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Creates stacked bar plot of total generation cost by cost type (fuel, emission, start cost etc.)
Creates a more deatiled system cost plot.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Fuel_Cost",self.Scenarios),
(True,"generator_VO&M_Cost",self.Scenarios),
(True,"generator_Start_&_Shutdown_Cost",self.Scenarios),
(False,"generator_Emissions_Cost",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"Zone = {zone_input}")
gen_cost_out_chunks = []
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Fuel_Cost = self["generator_Fuel_Cost"].get(scenario)
# Check if Fuel_cost contains zone_input, skips if not
try:
Fuel_Cost = Fuel_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for: {zone_input}")
continue
Fuel_Cost = Fuel_Cost.sum(axis=0)
Fuel_Cost.rename("Fuel_Cost", inplace=True)
VOM_Cost = self["generator_VO&M_Cost"].get(scenario)
VOM_Cost = VOM_Cost.xs(zone_input,level=self.AGG_BY)
VOM_Cost[0].values[VOM_Cost[0].values < 0] = 0
VOM_Cost = VOM_Cost.sum(axis=0)
VOM_Cost.rename("VO&M_Cost", inplace=True)
Start_Shutdown_Cost = self["generator_Start_&_Shutdown_Cost"].get(scenario)
Start_Shutdown_Cost = Start_Shutdown_Cost.xs(zone_input,level=self.AGG_BY)
Start_Shutdown_Cost = Start_Shutdown_Cost.sum(axis=0)
Start_Shutdown_Cost.rename("Start_&_Shutdown_Cost", inplace=True)
Emissions_Cost = self["generator_Emissions_Cost"][scenario]
if Emissions_Cost.empty:
self.logger.warning(f"generator_Emissions_Cost not included in {scenario} results, Emissions_Cost will not be included in plot")
Emissions_Cost = self["generator_Start_&_Shutdown_Cost"][scenario].copy()
Emissions_Cost.iloc[:,0] = 0
Emissions_Cost = Emissions_Cost.xs(zone_input,level=self.AGG_BY)
Emissions_Cost = Emissions_Cost.sum(axis=0)
Emissions_Cost.rename("Emissions_Cost", inplace=True)
Detailed_Gen_Cost = pd.concat([Fuel_Cost, VOM_Cost, Start_Shutdown_Cost, Emissions_Cost], axis=1, sort=False)
Detailed_Gen_Cost.columns = Detailed_Gen_Cost.columns.str.replace('_',' ')
Detailed_Gen_Cost = Detailed_Gen_Cost.sum(axis=0)
Detailed_Gen_Cost = Detailed_Gen_Cost.rename(scenario)
gen_cost_out_chunks.append(Detailed_Gen_Cost)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not gen_cost_out_chunks:
outputs[zone_input] = MissingZoneData()
continue
Detailed_Gen_Cost_Out = pd.concat(gen_cost_out_chunks, axis=1, sort=False)
Detailed_Gen_Cost_Out = Detailed_Gen_Cost_Out.T/1000000 #Convert cost to millions
Detailed_Gen_Cost_Out.index = Detailed_Gen_Cost_Out.index.str.replace('_',' ')
# Deletes columns that are all 0
Detailed_Gen_Cost_Out = Detailed_Gen_Cost_Out.loc[:, (Detailed_Gen_Cost_Out != 0).any(axis=0)]
# Checks if Detailed_Gen_Cost_Out contains data, if not skips zone and does not return a plot
if Detailed_Gen_Cost_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
if pd.notna(custom_data_file_path):
Total_Systems_Cost_Out = self.insert_custom_data_columns(
Total_Systems_Cost_Out,
custom_data_file_path)
# Data table of values to return to main program
Data_Table_Out = Detailed_Gen_Cost_Out.add_suffix(" (Million $)")
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Detailed_Gen_Cost_Out.index
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
mplt.barplot(Detailed_Gen_Cost_Out, stacked=True,
custom_tick_labels=tick_labels)
ax.axhline(y=0)
ax.set_ylabel('Total Generation Cost (Million $)',
color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
cost_totals = Detailed_Gen_Cost_Out.sum(axis=1) #holds total of each bar
#inserts values into bar stacks
for patch in ax.patches:
width, height = patch.get_width(), patch.get_height()
if height<=2:
continue
x, y = patch.get_xy()
ax.text(x+width/2,
y+height/2,
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=12)
#inserts total bar value above each bar
for k, patch in enumerate(ax.patches):
height = cost_totals[k]
width = patch.get_width()
x, y = patch.get_xy()
ax.text(x+width/2,
y+height + 0.05*max(ax.get_ylim()),
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=15, color='red')
if k>=len(cost_totals)-1:
break
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def sys_cost_type(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Creates stacked bar plot of total generation cost by generator technology type.
Another way to represent total generation cost, this time by tech type,
i.e Coal, Gas, Hydro etc.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
# Create Dictionary to hold Datframes for each scenario
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Total_Generation_Cost",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
gen_cost_out_chunks = []
self.logger.info(f"Zone = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Gen_Stack = self["generator_Total_Generation_Cost"].get(scenario)
# Check if Total_Gen_Stack contains zone_input, skips if not
try:
Total_Gen_Stack = Total_Gen_Stack.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Stack = self.df_process_gen_inputs(Total_Gen_Stack)
Total_Gen_Stack = Total_Gen_Stack.sum(axis=0)
Total_Gen_Stack.rename(scenario, inplace=True)
gen_cost_out_chunks.append(Total_Gen_Stack)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not gen_cost_out_chunks:
outputs[zone_input] = MissingZoneData()
continue
Total_Generation_Stack_Out =
|
pd.concat(gen_cost_out_chunks, axis=1, sort=False)
|
pandas.concat
|
import datetime
import klcalculator
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from statistics import mean, median
# takes a pandas dataframe
def get_combined_feature_risks(data):
features = list(data.columns.values)
combined_risks = []
combined_one_cols = []
for i in range(2, 3): # change this as required
combs = list(itertools.combinations(features, i))
for comb in combs:
cols = data[list(comb)].values.tolist()
one_col = []
for row in cols:
line = ''
for item in row:
line = line + str(item)
one_col.append(line)
combined_one_cols.append(one_col)
frame =
|
pd.DataFrame(combined_one_cols, index=combs)
|
pandas.DataFrame
|
import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import pandas.util.testing as pdt
import sys
from tabulate import tabulate
import unittest
# #find parent directory and import model
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from ..screenip_exe import Screenip
test = {}
class TestScreenip(unittest.TestCase):
"""
Unit tests for screenip.
"""
print("screenip unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for screenip unittest.
:return:
"""
pass
# screenip2 = screenip_model.screenip(0, pd_obj_inputs, pd_obj_exp_out)
# setup the test as needed
# e.g. pandas to open screenip qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for screenip unittest.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_screenip_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty screenip object
screenip_empty = Screenip(df_empty, df_empty)
return screenip_empty
def test_screenip_unit_fw_bird(self):
"""
unittest for function screenip.fw_bird:
:return:
"""
expected_results = pd.Series([0.0162, 0.0162, 0.0162], dtype='float')
result = pd.Series([], dtype='float')
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
try:
# for i in range(0,3):
# result[i] = screenip_empty.fw_bird()
screenip_empty.no_of_runs = len(expected_results)
screenip_empty.fw_bird()
result = screenip_empty.out_fw_bird
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_fw_mamm(self):
"""
unittest for function screenip.fw_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([0.172, 0.172, 0.172], dtype='float')
result = pd.Series([], dtype='float')
try:
screenip_empty.no_of_runs = len(expected_results)
result = screenip_empty.fw_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_dose_bird(self):
"""
unittest for function screenip.dose_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([1000000., 4805.50175, 849727.21122], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.out_fw_bird * self.solubility)/(self.bodyweight_assessed_bird / 1000.)
screenip_empty.out_fw_bird = pd.Series([10., 0.329, 1.8349], dtype='float')
screenip_empty.solubility = pd.Series([100., 34.9823, 453.83], dtype='float')
screenip_empty.bodyweight_assessed_bird = pd.Series([1.0, 2.395, 0.98], dtype='float')
result = screenip_empty.dose_bird()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_dose_mamm(self):
"""
unittest for function screenip.dose_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([8000000., 48205.7595, 3808036.37889], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.out_fw_mamm * self.solubility)/(self.bodyweight_assessed_mammal / 1000)
screenip_empty.out_fw_mamm = pd.Series([20., 12.843, 6.998], dtype='float')
screenip_empty.solubility = pd.Series([400., 34.9823, 453.83], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([1., 9.32, 0.834], dtype='float')
result = screenip_empty.dose_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_at_bird(self):
"""
unittest for function screenip.at_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([1000., 687.9231, 109.3361], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.ld50_avian_water) * ((self.bodyweight_assessed_bird / self.bodyweight_tested_bird)**(self.mineau_scaling_factor - 1.))
screenip_empty.ld50_avian_water = pd.Series([2000., 938.34, 345.83], dtype='float')
screenip_empty.bodyweight_assessed_bird = pd.Series([100., 39.49, 183.54], dtype='float')
screenip_empty.ld50_bodyweight_tested_bird = pd.Series([200., 73.473, 395.485], dtype='float')
screenip_empty.mineau_scaling_factor = pd.Series([2., 1.5, 2.5], dtype='float')
result = screenip_empty.at_bird()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_at_mamm(self):
"""
unittest for function screenip.at_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([11.89207, 214.0572, 412.6864], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.ld50_mammal_water) * ((self.bodyweight_tested_mammal / self.bodyweight_assessed_mammal)**0.25)
screenip_empty.ld50_mammal_water = pd.Series([10., 250., 500.], dtype='float')
screenip_empty.ld50_bodyweight_tested_mammal = pd.Series([200., 39.49, 183.54], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([100., 73.473, 395.485], dtype='float')
result = screenip_empty.at_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_fi_bird(self):
"""
unittest for function screenip.fi_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([0.012999, 0.026578, 0.020412], dtype='float')
result = pd.Series([], dtype='float')
try:
#0.0582 * ((bw_grams / 1000.)**0.651)
bw_grams = pd.Series([100., 300., 200.], dtype='float')
result = screenip_empty.fi_bird(bw_grams)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_act(self):
"""
unittest for function screenip.test_act:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([10.5737, 124.8032, 416.4873], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.noael_mammal_water) * ((self.bodyweight_tested_mammal / self.bodyweight_assessed_mammal)**0.25)
screenip_empty.noael_mammal_water = pd.Series([10., 120., 400.], dtype='float')
screenip_empty.noael_bodyweight_tested_mammal = pd.Series([500., 385.45, 673.854], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([400., 329.45, 573.322], dtype='float')
result = screenip_empty.act()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_det(self):
"""
unittest for function screenip.det
return:
"""
#
# '''
# Dose Equiv. Toxicity:
#
# The FI value (kg-diet) is multiplied by the reported NOAEC (mg/kg-diet) and then divided by
# the test animal's body weight to derive the dose-equivalent chronic toxicity value (mg/kg-bw):
#
# Dose Equiv. Toxicity = (NOAEC * FI) / BW
#
# NOTE: The user enters the lowest available NOAEC for the mallard duck, for the bobwhite quail,
# and for any other test species. The model calculates the dose equivalent toxicity values for
# all of the modeled values (Cells F20-24 and results worksheet) and then selects the lowest dose
# equivalent toxicity value to represent the chronic toxicity of the chemical to birds.
# '''
# try:
# # result =
# # self.assertEquals(result, )
# pass
# finally:
# pass
# return
#
#
# def test_det_duck(self):
# """
# unittest for function screenip.det_duck:
# :return:
# """
# try:
# # det_duck = (self.noaec_duck * self.fi_bird(1580.)) / (1580. / 1000.)
# screenip_empty.noaec_duck = pd.Series([1.], dtype='int')
# screenip_empty.fi_bird = pd.Series([1.], dtype='int')
# result = screenip_empty.det_duck()
# npt.assert_array_almost_equal(result, 1000., 4, '', True)
# finally:
# pass
# return
#
# def test_det_quail(self):
# """
# unittest for function screenip.det_quail:
# :return:
# """
# try:
# # det_quail = (self.noaec_quail * self.fi_bird(178.)) / (178. / 1000.)
# screenip_empty.noaec_quail = pd.Series([1.], dtype='int')
# screenip_empty.fi_bird = pd.Series([1.], dtype='int')
# result = screenip_empty.det_quail()
# npt.assert_array_almost_equal(result, 1000., 4, '', True)
# finally:
# pass
# return
#
# def test_det_other_1(self):
# """
# unittest for function screenip.det_other_1:
# :return:
# """
# try:
# #det_other_1 = (self.noaec_bird_other_1 * self.fi_bird(self.bodyweight_bird_other_1)) / (self.bodyweight_bird_other_1 / 1000.)
# #det_other_2 = (self.noaec_bird_other_2 * self.fi_bird(self.bodyweight_bird_other_1)) / (self.bodyweight_bird_other_1 / 1000.)
# screenip_empty.noaec_bird_other_1 = pd.Series([400.]) # mg/kg-diet
# screenip_empty.bodyweight_bird_other_1 = pd.Series([100]) # grams
# result = screenip_empty.det_other_1()
# npt.assert_array_almost_equal(result, 4666, 4)
# finally:
# pass
# return
#
# The following tests are configured such that:
# 1. four values are provided for each needed input
# 2. the four input values generate four values of out_det_* per bird type
# 3. the inputs per bird type are set so that calculations of out_det_* will result in
# each bird type having one minimum among the bird types;
# thus all four calculations result in one minimum per bird type
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([4.2174, 4.96125, 7.97237, 10.664648], dtype='float')
result = pd.Series([], dtype='float')
try:
screenip_empty.bodyweight_bobwhite_quail = 178.
screenip_empty.bodyweight_mallard_duck = 1580.
screenip_empty.noaec_quail = pd.Series([100., 300., 75., 150.], dtype='float')
screenip_empty.noaec_duck = pd.Series([400., 100., 200., 350.], dtype='float')
screenip_empty.noaec_bird_other_1 = pd.Series([50., 200., 300., 250.], dtype='float')
screenip_empty.noaec_bird_other_2 = pd.Series([350., 400., 250., 100.], dtype='float')
screenip_empty.noaec_bodyweight_bird_other_1 = pd.Series([345.34, 453.54, 649.29, 294.56], dtype='float')
screenip_empty.noaec_bodyweight_bird_other_2 = pd.Series([123.84, 85.743, 127.884, 176.34], dtype='float')
screenip_empty.no_of_runs = len(expected_results)
result = screenip_empty.det()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_acute_bird(self):
"""
unittest for function screenip.acute_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([10., 5.22093, 0.479639], dtype='float')
result = pd.Series([], dtype='float')
try:
# self.out_acute_bird = self.out_dose_bird / self.out_at_bird
screenip_empty.out_dose_bird = pd.Series([100., 121.23, 43.994], dtype='float')
screenip_empty.out_at_bird = pd.Series([10., 23.22, 91.723], dtype='float')
result = screenip_empty.acute_bird()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_acuconb(self):
"""
unittest for function screenip.acuconb:
Message stating whether or not a risk is present
:return:
"""
# if self.out_acuconb == -1:
# if self.out_acute_bird == None:
# raise ValueError\
# ('acute_bird variable equals None and therefor this function cannot be run.')
# if self.out_acute_bird < 0.1:
# self.out_acuconb = ('Drinking water exposure alone is NOT a potential concern for birds')
# else:
# self.out_acuconb = ('Exposure through drinking water alone is a potential concern for birds')
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series(["Exposure through drinking water alone is a potential concern "
"for birds", "Drinking water exposure alone is NOT a potential "
"concern for birds", "Exposure through drinking water alone is a "
"potential concern for birds"], dtype='object')
result = pd.Series([], dtype='object')
try:
screenip_empty.out_acute_bird = pd.Series([0.2, 0.09, 0.1], dtype='float')
result = screenip_empty.acuconb()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_acute_mamm(self):
"""
unittest for function screenip.acute_mamm:
:return:
"""
# self.out_acute_mamm = self.out_dose_mamm / self.out_at_mamm
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([10., 14.68657, 2.124852], dtype='float')
result = pd.Series([], dtype='float')
try:
screenip_empty.out_dose_mamm = pd.Series([100., 34.44, 159.349], dtype='float')
screenip_empty.out_at_mamm = pd.Series([10., 2.345, 74.993], dtype='float')
result = screenip_empty.acute_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_acuconm(self):
"""
unittest for function screenip.acuconm:
Message stating whether or not a risk is present
:return:
"""
# if self.out_acuconm == -1:
# if self.out_acute_mamm == None:
# raise ValueError\
# ('acute_mamm variable equals None and therefor this function cannot be run.')
# if self.out_acute_mamm < 0.1:
# self.out_acuconm = ('Drinking water exposure alone is NOT a potential concern for mammals')
# else:
# self.out_acuconm = ('Exposure through drinking water alone is a potential concern for mammals')
# return self.out_acuconm
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series(["Drinking water exposure alone is NOT a potential concern "
"for mammals", "Exposure through drinking water alone is a "
"potential concern for mammals", "Drinking water exposure "
"alone is NOT a potential concern for mammals"], dtype='object')
result = pd.Series([], dtype='object')
try:
screenip_empty.out_acute_mamm = pd.Series([0.09, 0.2, 0.002], dtype='float')
result = screenip_empty.acuconm()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_chron_bird(self):
"""
unittest for function screenip.chron_bird:
:return:
"""
#self.out_chron_bird = self.out_dose_bird / self.out_det
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([0.5, 0.10891, 2.39857], dtype='float')
result = pd.Series([], dtype='float')
try:
screenip_empty.out_dose_bird = pd.Series([5., 1.32, 19.191], dtype='float')
screenip_empty.out_det = pd.Series([10., 12.12, 8.001], dtype='float')
result = screenip_empty.chron_bird()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_chronconb(self):
"""
unittest for function screenip.chronconb:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series(["Drinking water exposure alone is NOT "
"a potential concern for birds", "Exposure through "
"drinking water alone is a potential concern for "
"birds", "Drinking water exposure alone is NOT a "
"potential concern for birds"], dtype='object')
result =
|
pd.Series([], dtype='object')
|
pandas.Series
|
from collections import defaultdict, Counter
import math
import pandas as pd
import numpy as np
import sys
import os
import random
from datetime import datetime
class DAFM_data:
def __init__(self, args, complete_data, user_train, user_test, df_user_responses=None, path="", section="no", use_batches=False):
self.range_batch()
self.bsize = 16
self.use_batches = use_batches
self.complete_data = complete_data
self.path = path
self.args = args
self.d_student = []
if not (self.args.theta[0]=="False"):
student = sorted(list(set(self.complete_data["user_id"].map(str))))
self.d_student = {j:i for i, j in enumerate(student)}
problem_ids = sorted(list(set(self.complete_data["problem_id"])))
d_problem = {j:i for i,j in enumerate(problem_ids)}
self.d_problem = d_problem
self.steps = len(d_problem)
g = list(self.complete_data.groupby(["user_id"]))
responses = []
for i in range(len(g)):
responses.append(len(g[i][1]))
self.max_responses = max(responses)
if section == "concat":
sections = []
for skills, section in zip(self.complete_data["skill_name"], self.complete_data["section"]):
if "~~" in skills:
temp = ""
for skill in str(skills).split("~~"):
temp += skill + "@@" + section + "~~"
temp = temp[:-2]
sections.append(temp)
else:
sections.append(skills+"__"+section)
self.complete_data["old_skill_name"] = self.complete_data["skill_name"]
self.complete_data["skill_name"] = sections
d_section = []
elif section == "onehot":
sections = sorted(list(set(self.complete_data["section"].map(str))))
d_section = {j:i for i, j in enumerate(sections)}
else:
d_section = []
self.d_section = d_section
self.section_count = len(self.d_section)
total_skills = []
for skills in list(self.complete_data["skill_name"]):
for skill in str(skills).split("~~"):
total_skills.append(skill)
total_skills = sorted(list(set(total_skills)))
d_skill = {j:i for i, j in enumerate(total_skills)}
self.d_skill = d_skill
self.skills = len(total_skills)
Q_jk_initialize = np.zeros((len(d_problem), len(total_skills)),dtype=np.float32 )
for problem, skills in zip(self.complete_data["problem_id"], self.complete_data["skill_name"]):
for skill in str(skills).split("~~"):
Q_jk_initialize[d_problem[problem], d_skill[skill]] = 1
self.Q_jk_initialize = Q_jk_initialize
users = set(list(self.complete_data["user_id"]))
self.user_train = list(users.intersection(set(user_train)))
self.user_test = list(users.intersection(set(user_test)))
self.df_user_responses = df_user_responses
if (not self.args.item_wise[0]=="False") and (args.puser[0]=="sub"):
self.complete_data = complete_data[complete_data["user_id"].isin(self.user_train+self.user_test)]
self.complete_data.index = list(range(len(self.complete_data)))
total_datapoints = len(self.complete_data)
items = list(range(total_datapoints))
self.response_path = self.path + "/log/Responses/"
if not os.path.exists(self.response_path):
print ("Creating Response Data")
os.makedirs(self.response_path)
random.shuffle(items)
self.training_items = items[:int(0.8*total_datapoints)]
self.testing_items = items[int(0.8*total_datapoints):]
pd.Series(self.training_items).to_csv(self.response_path+"train.csv", sep=",", header=None, index=False)
|
pd.Series(self.testing_items)
|
pandas.Series
|
import attr
from firedrake import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.linalg import svd
from scipy.sparse.linalg import svds
from scipy.sparse import csr_matrix
from slepc4py import SLEPc
import pandas as pd
from tqdm import tqdm
import os
matplotlib.use('Agg')
@attr.s
class ConditionNumberResult(object):
form = attr.ib()
assembled_form = attr.ib()
condition_number = attr.ib()
sparse_operator = attr.ib()
number_of_dofs = attr.ib()
nnz = attr.ib()
is_operator_symmetric = attr.ib()
bcs = attr.ib(default=list())
def plot_matrix(assembled_form, **kwargs):
"""Provides a plot of a matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
return plot
def plot_matrix_mixed(assembled_form, **kwargs):
"""Provides a plot of a mixed matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_primal_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_mixed_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
f1_size = assembled_form.M[1, 1].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
ax.axhline(y=f0_size[0] + f1_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] + f1_size[0] - 0.5, color="k")
return plot
def plot_matrix_hybrid_multiplier(a_form, trace_index=2, bcs=[], **kwargs):
"""Provides a plot of a condensed hybrid-mixed matrix for single scale problems."""
fig, ax = plt.subplots(1, 1)
_A = Tensor(a_form)
A = _A.blocks
idx = trace_index
S = A[idx, idx] - A[idx, :idx] * A[:idx, :idx].inv * A[:idx, idx]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Below there is the spy alternative
# plot = plt.spy(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
return plot
def filter_real_part_in_array(array: np.ndarray, imag_threshold: float = 1e-5) -> np.ndarray:
"""Utility function to filter real part in a numpy array.
:param array:
Array with real and complex numbers.
:param imag_threshold:
Threshold to cut off imaginary part in complex number.
:return:
Filtered array with only real numbers.
"""
real_part_array = array.real[abs(array.imag) < 1e-5]
return real_part_array
def calculate_condition_number(
A,
num_of_factors,
backend: str = "scipy",
use_sparse: bool = False,
zero_tol: float = 1e-5
):
backend = backend.lower()
if backend == "scipy":
size = A.getSize()
Mnp = csr_matrix(A.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
if use_sparse:
singular_values = svds(
A=Mnp,
k=num_of_factors,
which="LM",
maxiter=5000,
return_singular_vectors=False,
solver="lobpcg"
)
else:
M = Mnp.toarray()
singular_values = svd(M, compute_uv=False, check_finite=False)
singular_values = singular_values[singular_values > zero_tol]
condition_number = singular_values.max() / singular_values.min()
elif backend == "slepc":
S = SLEPc.SVD()
S.create()
S.setOperator(A)
S.setType(SLEPc.SVD.Type.LAPACK)
S.setDimensions(nsv=num_of_factors)
S.setTolerances(max_it=5000)
S.setWhichSingularTriplets(SLEPc.SVD.Which.LARGEST)
S.solve()
num_converged_values = S.getConverged()
singular_values_list = list()
if num_converged_values > 0:
for i in range(num_converged_values):
singular_value = S.getValue(i)
singular_values_list.append(singular_value)
else:
raise RuntimeError("SLEPc SVD has not converged.")
singular_values = np.array(singular_values_list)
singular_values = singular_values[singular_values > zero_tol]
condition_number = singular_values.max() / singular_values.min()
else:
raise NotImplementedError("The required method for condition number estimation is currently unavailable.")
return condition_number
def solve_poisson_cg(mesh, degree=1, use_quads=False):
# Function space declaration
V = FunctionSpace(mesh, "CG", degree)
# Trial and test functions
u = TrialFunction(V)
v = TestFunction(V)
# Dirichlet BCs
bcs = DirichletBC(V, 0.0, "on_boundary")
# Variational form
a = inner(grad(u), grad(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = V.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_ls(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Stabilization parameters
delta_1 = Constant(1)
delta_2 = Constant(1)
delta_3 = Constant(1)
# Least-squares terms
a = delta_1 * inner(u + grad(p), v + grad(q)) * dx
a += delta_2 * div(u) * div(v) * dx
a += delta_3 * inner(curl(u), curl(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_cgls(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - div(v) * p - q * div(u)) * dx
# Stabilizing terms
a += -0.5 * inner((u + grad(p)), v + grad(q)) * dx
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
a += 0.5 * div(u) * div(v) * dx
a += 0.5 * inner(curl(u), curl(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_vms(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - div(v) * p + q * div(u)) * dx
# Stabilizing terms
a += 0.5 * inner(u + grad(p), grad(q) - v) * dx
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
# a += 0.5 * div(u) * div(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# L += 0.5 * f * div(v) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_mixed_RT(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
if use_quads:
hdiv_family = 'RTCF'
pressure_family = 'DQ'
else:
hdiv_family = 'RT'
pressure_family = 'DG'
U = FunctionSpace(mesh, hdiv_family, degree + 1)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - div(v) * p + q * div(u)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dgls(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
L0 = 1
eta_p = L0 * h # method B in the Badia-Codina paper
# eta_p = 1
# eta_p = L0 * L0 # method D in the Badia-Codina paper
eta_u = h / L0 # method B in the Badia-Codina paper
# eta_u = 1
# Nitsche's penalizing term
beta_0 = Constant(1.0)
beta = beta_0 / h
# Mixed classical terms
a = (dot(u, v) - div(v) * p - q * div(u)) * dx
# DG terms
a += jump(v, n) * avg(p) * dS - avg(q) * jump(u, n) * dS
# Edge stabilizing terms
# ** Badia-Codina based
a += (avg(eta_p) / h_avg) * (jump(u, n) * jump(v, n)) * dS
a += (avg(eta_u) / h_avg) * dot(jump(p, n), jump(q, n)) * dS
# ** Mesh independent terms
# a += jump(u, n) * jump(v, n) * dS
# a += dot(jump(p, n), jump(q, n)) * dS
# Volumetric stabilizing terms
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
# a += -0.5 * inner(u + grad(p), v + grad(q)) * dx
# a += 0.5 * div(u) * div(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# ** Badia-Codina based
a += -eta_u * inner(u + grad(p), v + grad(q)) * dx
a += eta_p * div(u) * div(v) * dx
a += eta_p * inner(curl(u), curl(v)) * dx
# Weakly imposed boundary conditions
a += dot(v, n) * p * ds - q * dot(u, n) * ds
a += beta * p * q * ds # may decrease convergente rates
# ** The terms below are based on ASGS Badia-Codina (2010), it is not a classical Nitsche's method
a += (eta_p / h) * dot(u, n) * dot(v, n) * ds
a += (eta_u / h) * dot(p * n, q * n) * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dvms(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
L0 = 1
eta_p = L0 * h # method B in the Badia-Codina paper
# eta_p = L0 * L0 # method D in the Badia-Codina paper
eta_u = h / L0 # method B in the Badia-Codina paper
# Mixed classical terms
a = (dot(u, v) - div(v) * p + q * div(u)) * dx
# DG terms
a += jump(v, n) * avg(p) * dS - avg(q) * jump(u, n) * dS
# Edge stabilizing terms
# ** Badia-Codina based
a += (avg(eta_p) / h_avg) * (jump(u, n) * jump(v, n)) * dS
a += (avg(eta_u) / h_avg) * dot(jump(p, n), jump(q, n)) * dS
# ** Mesh independent (original)
# a += jump(u, n) * jump(v, n) * dS # not considered in the original paper
# a += dot(jump(p, n), jump(q, n)) * dS
# Volumetric stabilizing terms
# a += 0.5 * inner(u + grad(p), grad(q) - v) * dx
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
# a += 0.5 * div(u) * div(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# L += 0.5 * f * div(v) * dx
# ** Badia-Codina based
a += eta_u * inner(u + grad(p), grad(q) - v) * dx
a += eta_p * div(u) * div(v) * dx
# Weakly imposed boundary conditions
a += dot(v, n) * p * ds - q * dot(u, n) * ds
# ** The terms below are based on ASGS Badia-Codina (2010), it is not a classical Nitsche's method
a += (eta_p / h) * dot(u, n) * dot(v, n) * ds
a += (eta_u / h) * dot(p * n, q * n) * ds # may decrease convergente rates
# ** Classical Nitsche
# a += beta * p * q * ds # may decrease convergente rates (Nitsche)
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_sipg(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
V = FunctionSpace(mesh, pressure_family, degree)
# Trial and test functions
p = TrialFunction(V)
q = TestFunction(V)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Edge stabilizing parameter
beta0 = Constant(1e1)
beta = beta0 / h
# Symmetry term. Choose if the method is SIPG (-1) or NIPG (1)
s = Constant(-1)
# Classical volumetric terms
a = inner(grad(p), grad(q)) * dx
L = f * q * dx
# DG edge terms
a += s * dot(jump(p, n), avg(grad(q))) * dS - dot(avg(grad(p)), jump(q, n)) * dS
# Edge stabilizing terms
a += beta("+") * dot(jump(p, n), jump(q, n)) * dS
# Weak boundary conditions
a += s * dot(p * n, grad(q)) * ds - dot(grad(p), q * n) * ds
a += beta * p * q * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = V.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dls(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
# L0 = 1
# eta_p = L0 * h_avg # method B in the Badia-Codina paper
eta_p = 1
# eta_p = L0 * L0 # method D in the Badia-Codina paper
# eta_u = h_avg / L0 # method B in the Badia-Codina paper
eta_u = 1
# eta_u_bc = h / L0 # method B in the Badia-Codina paper
eta_u_bc = 1
# Least-Squares weights
delta = Constant(1.0)
# delta = h
delta_0 = delta
delta_1 = delta
delta_2 = delta
delta_3 = 1 / h
delta_4 = 1 / h
# Least-squares terms
a = delta_0 * inner(u + grad(p), v + grad(q)) * dx
a += delta_1 * div(u) * div(v) * dx
a += delta_2 * inner(curl(u), curl(v)) * dx
# Edge stabilizing terms
# ** Badia-Codina based (better results) **
a += eta_u * avg(delta_3) * (jump(u, n) * jump(v, n)) * dS
a += eta_p * avg(delta_4) * dot(jump(p, n), jump(q, n)) * dS
a += eta_u_bc * delta_3 * p * q * ds # may decrease convergente rates
a += eta_u_bc * delta_4 * dot(u, n) * dot(v, n) * ds
# ** Mesh independent **
# a += jump(u, n) * jump(v, n) * dS
# a += dot(jump(p, n), jump(q, n)) * dS
# a += p * q * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-12)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_sdhm(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# BCs
u_projected = sigma_e
p_boundaries = p_exact
bcs = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e-18)
# beta = beta_0 / h
beta = beta_0
# Stabilization parameters
delta_0 = Constant(-1)
delta_1 = Constant(-0.5) * h * h
delta_2 = Constant(0.5) * h * h
delta_3 = Constant(0.5) * h * h
# Mixed classical terms
a = (dot(u, v) - div(v) * p + delta_0 * q * div(u)) * dx
L = delta_0 * f * q * dx
# Stabilizing terms
a += delta_1 * inner(u + grad(p), v + grad(q)) * dx
a += delta_2 * div(u) * div(v) * dx
a += delta_3 * inner(curl(u), curl(v)) * dx
L += delta_2 * f * div(v) * dx
# Hybridization terms
a += lambda_h("+") * dot(v, n)("+") * dS + mu_h("+") * dot(u, n)("+") * dS
a += beta("+") * (lambda_h("+") - p("+")) * (mu_h("+") - q("+")) * dS
# Weakly imposed BC
a += (p_boundaries * dot(v, n) + mu_h * (dot(u, n) - dot(u_projected, n))) * ds
a += beta * (lambda_h - p_boundaries) * mu_h * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bcs
)
return result
def solve_poisson_hdg(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
bc_multiplier = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e0)
beta = beta_0 / h
# beta = beta_0
# Numerical flux trace
u_hat = u + beta * (p - lambda_h) * n
# HDG classical form
a = (dot(u, v) - div(v) * p) * dx + lambda_h("+") * jump(v, n) * dS
a += -dot(u, grad(q)) * dx + jump(u_hat, n) * q("+") * dS
L = f * q * dx
# Transmission condition
a += jump(u_hat, n) * mu_h("+") * dS
# Weakly imposed BC
a += lambda_h * dot(v, n) * ds
a += dot(u_hat, n) * q * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_cgh(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
p, lambda_h = TrialFunctions(W)
q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
bc_multiplier = DirichletBC(W.sub(1), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e0)
beta = beta_0 / h
# beta = beta_0
# Numerical flux trace
u = -grad(p)
u_hat = u + beta * (p - lambda_h) * n
# HDG classical form
a = -dot(u, grad(q)) * dx + jump(u_hat, n) * q("+") * dS
L = f * q * dx
# Transmission condition
a += jump(u_hat, n) * mu_h("+") * dS
# Weakly imposed BC
a += dot(u_hat, n) * q * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[1, 1] - A[1, :1] * A[:1, :1].inv * A[:1, 1]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_ldgc(
mesh,
degree=1,
is_multiplier_continuous=True
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
primal_family = "DQ" if use_quads else "DG"
V = FunctionSpace(mesh, primal_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
trace_family = "HDiv Trace"
T = FunctionSpace(mesh, trace_family, degree)
W = V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
p, lambda_h = TrialFunctions(W)
q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
p_boundaries = Constant(0.0)
bc_multiplier = DirichletBC(W.sub(1), p_exact, "on_boundary")
# Hybridization parameter
s = Constant(-1.0)
beta = Constant(32.0)
h = CellDiameter(mesh)
h_avg = avg(h)
# Classical term
a = dot(grad(p), grad(q)) * dx
L = f * q * dx
# Hybridization terms
a += s * dot(grad(q), n)("+") * (p("+") - lambda_h("+")) * dS
a += -dot(grad(p), n)("+") * (q("+") - mu_h("+")) * dS
a += (beta / h_avg) * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS
# Boundary terms
# a += -dot(vel_projected, n) * v * ds # How to set this bc??
# a += (beta / h) * (p- p_boundaries) * q * ds # is this necessary?
L += s * dot(grad(q), n) * p_boundaries * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[1, 1] - A[1, :1] * A[:1, :1].inv * A[:1, 1]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_lsh(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
trace_family = "HDiv Trace"
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# BCs
bcs = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0)
beta = beta_0 / h
beta_avg = beta_0 / h("+")
# Stabilizing parameter
# delta_0 = Constant(1)
# delta_1 = Constant(1)
# delta_2 = Constant(1)
# delta_3 = Constant(1)
# delta_4 = Constant(1)
# delta_5 = Constant(1)
# LARGE_NUMBER = Constant(1e0)
delta = h * h
# delta = Constant(1)
# delta = h
delta_0 = delta
delta_1 = delta
delta_2 = delta
delta_3 = delta
delta_4 = delta
# delta_4 = LARGE_NUMBER / h
delta_5 = delta
# Numerical flux trace
u_hat = u + beta * (p - lambda_h) * n
v_hat = v + beta * (q - mu_h) * n
# Flux least-squares
# a = (
# (inner(u, v) - q * div(u) - p * div(v) + inner(grad(p), grad(q)))
# * delta_1
# * dx
# )
# # These terms below are unsymmetric
# a += delta_1 * jump(u_hat, n=n) * q("+") * dS
# a += delta_1("+") * dot(u_hat, n) * q * ds
# # a += delta_1 * dot(u, n) * q * ds
# # L = -delta_1 * dot(u_projected, n) * q * ds
# a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS
# a += delta_1 * lambda_h * dot(v, n) * ds
# # L = delta_1 * p_exact * dot(v, n) * ds
# Flux Least-squares as in DG
a = delta_0 * inner(u + grad(p), v + grad(q)) * dx
# Classical mixed Darcy eq. first-order terms as stabilizing terms
a += delta_1 * (dot(u, v) - div(v) * p) * dx
a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS
a += delta_1 * lambda_h * dot(v, n) * ds
# Mass balance least-square
a += delta_2 * div(u) * div(v) * dx
# L = delta_2 * f * div(v) * dx
# Irrotational least-squares
a += delta_3 * inner(curl(u), curl(v)) * dx
# Hybridization terms
a += mu_h("+") * jump(u_hat, n=n) * dS
a += delta_4("+") * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS
# a += delta_4 * (p - lambda_h) * (q - mu_h) * ds
# a += delta_5 * (dot(u, n)("+") - dot(u_hat, n)("+")) * (dot(v, n)("+") - dot(v_hat, n)("+")) * dS
# a += delta_5 * (dot(u, n) - dot(u_hat, n)) * (dot(v, n) - dot(v_hat, n)) * ds
# Weakly imposed BC from hybridization
# a += mu_h * (lambda_h - p_boundaries) * ds
# a += mu_h * lambda_h * ds
# ###
# a += (
# (mu_h - q) * (lambda_h - p_boundaries) * ds
# ) # maybe this is not a good way to impose BC, but this necessary
_A = Tensor(a)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bcs
)
return result
def hp_refinement_cond_number_calculation(
solver,
min_degree=1,
max_degree=4,
numel_xy=(5, 10, 15, 20, 25),
quadrilateral=True,
name="",
**kwargs
):
results_dict = {
"Element": list(),
"Number of Elements": list(),
"Degree": list(),
"Symmetric": list(),
"nnz": list(),
"dofs": list(),
"h": list(),
"Condition Number": list(),
}
element_kind = "Quad" if quadrilateral else "Tri"
pbar = tqdm(range(min_degree, max_degree))
for degree in pbar:
for n in numel_xy:
pbar.set_description(f"Processing {name} - degree = {degree} - N = {n}")
mesh = UnitSquareMesh(n, n, quadrilateral=quadrilateral)
result = solver(mesh, degree=degree)
current_cell_size = mesh.cell_sizes.dat.data_ro.min() if not quadrilateral else 1 / n
results_dict["Element"].append(element_kind)
results_dict["Number of Elements"].append(n * n)
results_dict["Degree"].append(degree)
results_dict["Symmetric"].append(result.is_operator_symmetric)
results_dict["nnz"].append(result.nnz)
results_dict["dofs"].append(result.number_of_dofs)
results_dict["h"].append(current_cell_size)
results_dict["Condition Number"].append(result.condition_number)
os.makedirs("./cond_number_results/results_%s" % name, exist_ok=True)
df_cond_number =
|
pd.DataFrame(data=results_dict)
|
pandas.DataFrame
|
from unittest import TestCase
from numpy import ndarray
from pandas import DataFrame, Timestamp
from SModelWrap import ModelPerClass
class FakeModel():
"""
Fakemodel: Just returns the first val
"""
def predict(self, x_val: DataFrame) -> ndarray:
column = x_val.columns.tolist()[0]
return x_val[column].to_numpy()
class TestThings(TestCase):
@classmethod
def setUpClass(cls) -> None:
data = ["5", "4", "3", "3", "2", "2", "1", "1"]
index = [
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:02'),
|
Timestamp('20130101 09:00:03')
|
pandas.Timestamp
|
# Quandl for financial analysis, pandas and numpy for data manipulation
# fbprophet for additive models, #pytrends for Google trend data
import quandl
import pandas as pd
import numpy as np
import fbprophet
import pytrends
import datetime
from pytrends.request import TrendReq
from pandas_datareader import data as pddata
# matplotlib pyplot for plotting
import matplotlib.pyplot as plt
import matplotlib
# Class for analyzing and (attempting) to predict future prices
# Contains a number of visualizations and analysis methods
class Stocker():
# Initialization requires a ticker symbol
def __init__(self, ticker, exchange='WIKI', source="quandl"):
# Enforce capitalization
ticker = ticker.upper()
# Symbol is used for labeling plots
self.symbol = ticker
# Use Personal Api Key
# quandl.ApiConfig.api_key = 'YourKeyHere'
# Retrieval the financial data
if source.lower() == "quandl":
try:
stock = quandl.get('%s/%s' % (exchange, ticker))
except Exception as e:
print('Error Retrieving Data from Quandal.')
print(e)
return
elif source.lower() == "yahoo":
try:
start = datetime.datetime(2001, 1, 1)
end = datetime.date.today()
stock = pddata.DataReader(ticker, 'yahoo', start, end)
except Exception as e:
print('Error Retrieving Data from Yahoo.')
print(e)
return
else:
print ("Error: Unknown Source")
return
# Set the index to a column called Date
stock = stock.reset_index(level=0)
# Columns required for prophet
stock['ds'] = stock['Date']
if ('Adj. Close' not in stock.columns):
stock['Adj. Close'] = stock['Close']
stock['Adj. Open'] = stock['Open']
stock['y'] = stock['Adj. Close']
stock['Daily Change'] = stock['Adj. Close'] - stock['Adj. Open']
# Data assigned as class attribute
self.stock = stock.copy()
# Minimum and maximum date in range
self.min_date = min(stock['Date'])
self.max_date = max(stock['Date'])
# Find max and min prices and dates on which they occurred
self.max_price = np.max(self.stock['y'])
self.min_price = np.min(self.stock['y'])
self.min_price_date = self.stock[self.stock['y'] == self.min_price]['Date']
self.min_price_date = self.min_price_date[self.min_price_date.index[0]]
self.max_price_date = self.stock[self.stock['y'] == self.max_price]['Date']
self.max_price_date = self.max_price_date[self.max_price_date.index[0]]
# The starting price (starting with the opening price)
self.starting_price = float(self.stock.ix[0, 'Adj. Open'])
# The most recent price
self.most_recent_price = float(self.stock.ix[len(self.stock) - 1, 'y'])
# Whether or not to round dates
self.round_dates = True
# Number of years of data to train on
self.training_years = 3
# Prophet parameters
# Default prior from library
self.changepoint_prior_scale = 0.05
self.weekly_seasonality = False
self.daily_seasonality = False
self.monthly_seasonality = True
self.yearly_seasonality = True
self.changepoints = None
print('{} Stocker Initialized. Data covers {} to {}.'.format(self.symbol,
self.min_date.date(),
self.max_date.date()))
"""
Make sure start and end dates are in the range and can be
converted to pandas datetimes. Returns dates in the correct format
"""
def handle_dates(self, start_date, end_date):
# Default start and end date are the beginning and end of data
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
try:
# Convert to pandas datetime for indexing dataframe
start_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)
except Exception as e:
print('Enter valid pandas date format.')
print(e)
return
valid_start = False
valid_end = False
# User will continue to enter dates until valid dates are met
while (not valid_start) & (not valid_end):
valid_end = True
valid_start = True
if end_date.date() < start_date.date():
print('End Date must be later than start date.')
start_date = pd.to_datetime(input('Enter a new start date: '))
end_date= pd.to_datetime(input('Enter a new end date: '))
valid_end = False
valid_start = False
else:
if end_date.date() > self.max_date.date():
print('End Date exceeds data range')
end_date= pd.to_datetime(input('Enter a new end date: '))
valid_end = False
if start_date.date() < self.min_date.date():
print('Start Date is before date range')
start_date = pd.to_datetime(input('Enter a new start date: '))
valid_start = False
return start_date, end_date
"""
Return the dataframe trimmed to the specified range.
"""
def make_df(self, start_date, end_date, df=None):
# Default is to use the object stock data
if not df:
df = self.stock.copy()
start_date, end_date = self.handle_dates(start_date, end_date)
# keep track of whether the start and end dates are in the data
start_in = True
end_in = True
# If user wants to round dates (default behavior)
if self.round_dates:
# Record if start and end date are in df
if (start_date not in list(df['Date'])):
start_in = False
if (end_date not in list(df['Date'])):
end_in = False
# If both are not in dataframe, round both
if (not end_in) & (not start_in):
trim_df = df[(df['Date'] >= start_date.date()) &
(df['Date'] <= end_date.date())]
else:
# If both are in dataframe, round neither
if (end_in) & (start_in):
trim_df = df[(df['Date'] >= start_date.date()) &
(df['Date'] <= end_date.date())]
else:
# If only start is missing, round start
if (not start_in):
trim_df = df[(df['Date'] > start_date.date()) &
(df['Date'] <= end_date.date())]
# If only end is imssing round end
elif (not end_in):
trim_df = df[(df['Date'] >= start_date.date()) &
(df['Date'] < end_date.date())]
else:
valid_start = False
valid_end = False
while (not valid_start) & (not valid_end):
start_date, end_date = self.handle_dates(start_date, end_date)
# No round dates, if either data not in, print message and return
if (start_date in list(df['Date'])):
valid_start = True
if (end_date in list(df['Date'])):
valid_end = True
# Check to make sure dates are in the data
if (start_date not in list(df['Date'])):
print('Start Date not in data (either out of range or not a trading day.)')
start_date = pd.to_datetime(input(prompt='Enter a new start date: '))
elif (end_date not in list(df['Date'])):
print('End Date not in data (either out of range or not a trading day.)')
end_date = pd.to_datetime(input(prompt='Enter a new end date: ') )
# Dates are not rounded
trim_df = df[(df['Date'] >= start_date.date()) &
(df['Date'] <= end_date.date())]
return trim_df
# Basic Historical Plots and Basic Statistics
def plot_stock(self, start_date=None, end_date=None, stats=['Adj. Close'], plot_type='basic'):
self.reset_plot()
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
stock_plot = self.make_df(start_date, end_date)
colors = ['r', 'b', 'g', 'y', 'c', 'm']
for i, stat in enumerate(stats):
stat_min = min(stock_plot[stat])
stat_max = max(stock_plot[stat])
stat_avg = np.mean(stock_plot[stat])
date_stat_min = stock_plot[stock_plot[stat] == stat_min]['Date']
date_stat_min = date_stat_min[date_stat_min.index[0]].date()
date_stat_max = stock_plot[stock_plot[stat] == stat_max]['Date']
date_stat_max = date_stat_max[date_stat_max.index[0]].date()
print('Maximum {} = {:.2f} on {}.'.format(stat, stat_max, date_stat_max))
print('Minimum {} = {:.2f} on {}.'.format(stat, stat_min, date_stat_min))
print('Current {} = {:.2f} on {}.\n'.format(stat, self.stock.ix[len(self.stock) - 1, stat], self.max_date.date()))
# Percentage y-axis
if plot_type == 'pct':
# Simple Plot
plt.style.use('fivethirtyeight');
if stat == 'Daily Change':
plt.plot(stock_plot['Date'], 100 * stock_plot[stat],
color = colors[i], linewidth = 2.4, alpha = 0.9,
label = stat)
else:
plt.plot(stock_plot['Date'], 100 * (stock_plot[stat] - stat_avg) / stat_avg,
color = colors[i], linewidth = 2.4, alpha = 0.9,
label = stat)
plt.xlabel('Date'); plt.ylabel('Change Relative to Average (%)'); plt.title('%s Stock History' % self.symbol);
plt.legend(prop={'size':10})
plt.grid(color = 'k', alpha = 0.4);
# Stat y-axis
elif plot_type == 'basic':
plt.style.use('fivethirtyeight');
plt.plot(stock_plot['Date'], stock_plot[stat], color = colors[i], linewidth = 3, label = stat, alpha = 0.8)
plt.xlabel('Date'); plt.ylabel('US $'); plt.title('%s Stock History' % self.symbol);
plt.legend(prop={'size':10})
plt.grid(color = 'k', alpha = 0.4);
plt.show();
# Reset the plotting parameters to clear style formatting
# Not sure if this should be a static method
@staticmethod
def reset_plot():
# Restore default parameters
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
# Adjust a few parameters to liking
matplotlib.rcParams['figure.figsize'] = (8, 5)
matplotlib.rcParams['axes.labelsize'] = 10
matplotlib.rcParams['xtick.labelsize'] = 8
matplotlib.rcParams['ytick.labelsize'] = 8
matplotlib.rcParams['axes.titlesize'] = 14
matplotlib.rcParams['text.color'] = 'k'
# Method to linearly interpolate prices on the weekends
def resample(self, dataframe):
# Change the index and resample at daily level
dataframe = dataframe.set_index('ds')
dataframe = dataframe.resample('D')
# Reset the index and interpolate nan values
dataframe = dataframe.reset_index(level=0)
dataframe = dataframe.interpolate()
return dataframe
# Remove weekends from a dataframe
def remove_weekends(self, dataframe):
# Reset index to use ix
dataframe = dataframe.reset_index(drop=True)
weekends = []
# Find all of the weekends
for i, date in enumerate(dataframe['ds']):
if (date.weekday()) == 5 | (date.weekday() == 6):
weekends.append(i)
# Drop the weekends
dataframe = dataframe.drop(weekends, axis=0)
return dataframe
# Calculate and plot profit from buying and holding shares for specified date range
def buy_and_hold(self, start_date=None, end_date=None, nshares=1):
self.reset_plot()
start_date, end_date = self.handle_dates(start_date, end_date)
# Find starting and ending price of stock
start_price = float(self.stock[self.stock['Date'] == start_date]['Adj. Open'])
end_price = float(self.stock[self.stock['Date'] == end_date]['Adj. Close'])
# Make a profit dataframe and calculate profit column
profits = self.make_df(start_date, end_date)
profits['hold_profit'] = nshares * (profits['Adj. Close'] - start_price)
# Total profit
total_hold_profit = nshares * (end_price - start_price)
print('{} Total buy and hold profit from {} to {} for {} shares = ${:.2f}'.format
(self.symbol, start_date.date(), end_date.date(), nshares, total_hold_profit))
# Plot the total profits
plt.style.use('dark_background')
# Location for number of profit
text_location = (end_date - pd.DateOffset(months = 1)).date()
# Plot the profits over time
plt.plot(profits['Date'], profits['hold_profit'], 'b', linewidth = 3)
plt.ylabel('Profit ($)'); plt.xlabel('Date'); plt.title('Buy and Hold Profits for {} {} to {}'.format(
self.symbol, start_date.date(), end_date.date()))
# Display final value on graph
plt.text(x = text_location,
y = total_hold_profit + (total_hold_profit / 40),
s = '$%d' % total_hold_profit,
color = 'g' if total_hold_profit > 0 else 'r',
size = 14)
plt.grid(alpha=0.2)
plt.show();
# Create a prophet model without training
def create_model(self):
# Make the model
model = fbprophet.Prophet(daily_seasonality=self.daily_seasonality,
weekly_seasonality=self.weekly_seasonality,
yearly_seasonality=self.yearly_seasonality,
changepoint_prior_scale=self.changepoint_prior_scale,
changepoints=self.changepoints)
if self.monthly_seasonality:
# Add monthly seasonality
model.add_seasonality(name = 'monthly', period = 30.5, fourier_order = 5)
return model
# Graph the effects of altering the changepoint prior scale (cps)
def changepoint_prior_analysis(self, changepoint_priors=[0.001, 0.05, 0.1, 0.2], colors=['b', 'r', 'grey', 'gold']):
# Training and plotting with specified years of data
train = self.stock[(self.stock['Date'] > (max(self.stock['Date']) - pd.DateOffset(years=self.training_years)).date())]
# Iterate through all the changepoints and make models
for i, prior in enumerate(changepoint_priors):
# Select the changepoint
self.changepoint_prior_scale = prior
# Create and train a model with the specified cps
model = self.create_model()
model.fit(train)
future = model.make_future_dataframe(periods=180, freq='D')
# Make a dataframe to hold predictions
if i == 0:
predictions = future.copy()
future = model.predict(future)
# Fill in prediction dataframe
predictions['%.3f_yhat_upper' % prior] = future['yhat_upper']
predictions['%.3f_yhat_lower' % prior] = future['yhat_lower']
predictions['%.3f_yhat' % prior] = future['yhat']
# Remove the weekends
predictions = self.remove_weekends(predictions)
# Plot set-up
self.reset_plot()
plt.style.use('fivethirtyeight')
fig, ax = plt.subplots(1, 1)
# Actual observations
ax.plot(train['ds'], train['y'], 'ko', ms = 4, label = 'Observations')
color_dict = {prior: color for prior, color in zip(changepoint_priors, colors)}
# Plot each of the changepoint predictions
for prior in changepoint_priors:
# Plot the predictions themselves
ax.plot(predictions['ds'], predictions['%.3f_yhat' % prior], linewidth = 1.2,
color = color_dict[prior], label = '%.3f prior scale' % prior)
# Plot the uncertainty interval
ax.fill_between(predictions['ds'].dt.to_pydatetime(), predictions['%.3f_yhat_upper' % prior],
predictions['%.3f_yhat_lower' % prior], facecolor = color_dict[prior],
alpha = 0.3, edgecolor = 'k', linewidth = 0.6)
# Plot labels
plt.legend(loc = 2, prop={'size': 10})
plt.xlabel('Date'); plt.ylabel('Stock Price ($)'); plt.title('Effect of Changepoint Prior Scale');
plt.show()
# Basic prophet model for specified number of days
def create_prophet_model(self, days=0, resample=False):
self.reset_plot()
model = self.create_model()
# Fit on the stock history for self.training_years number of years
stock_history = self.stock[self.stock['Date'] > (self.max_date - pd.DateOffset(years = self.training_years)).date()]
if resample:
stock_history = self.resample(stock_history)
model.fit(stock_history)
# Make and predict for next year with future dataframe
future = model.make_future_dataframe(periods = days, freq='D')
future = model.predict(future)
if days > 0:
# Print the predicted price
print('Predicted Price on {} = ${:.2f}'.format(
future.ix[len(future) - 1, 'ds'].date(), future.ix[len(future) - 1, 'yhat']))
title = '%s Historical and Predicted Stock Price' % self.symbol
else:
title = '%s Historical and Modeled Stock Price' % self.symbol
# Set up the plot
fig, ax = plt.subplots(1, 1)
# Plot the actual values
ax.plot(stock_history['ds'], stock_history['y'], 'ko-', linewidth = 1.4, alpha = 0.8, ms = 1.8, label = 'Observations')
# Plot the predicted values
ax.plot(future['ds'], future['yhat'], 'forestgreen',linewidth = 2.4, label = 'Modeled');
# Plot the uncertainty interval as ribbon
ax.fill_between(future['ds'].dt.to_pydatetime(), future['yhat_upper'], future['yhat_lower'], alpha = 0.3,
facecolor = 'g', edgecolor = 'k', linewidth = 1.4, label = 'Confidence Interval')
# Plot formatting
plt.legend(loc = 2, prop={'size': 10}); plt.xlabel('Date'); plt.ylabel('Price $');
plt.grid(linewidth=0.6, alpha = 0.6)
plt.title(title);
plt.show()
return model, future
# Evaluate prediction model for one year
def evaluate_prediction(self, start_date=None, end_date=None, nshares = None):
# Default start date is one year before end of data
# Default end date is end date of data
if start_date is None:
start_date = self.max_date - pd.DateOffset(years=1)
if end_date is None:
end_date = self.max_date
start_date, end_date = self.handle_dates(start_date, end_date)
# Training data starts self.training_years years before start date and goes up to start date
train = self.stock[(self.stock['Date'] < start_date.date()) &
(self.stock['Date'] > (start_date -
|
pd.DateOffset(years=self.training_years)
|
pandas.DateOffset
|
"""GPX
This module manages load and save operations on GPX files.
Author: alguerre
License: MIT
"""
import os
import pandas as pd
import numpy as np
import gpxpy
from libs.constants import Constants as c
class LoadGpxError(Exception):
pass
class Gpx:
"""
Management of load and save operations for GPX files.
"""
def __init__(self):
# Private attributes
self.filename = None
self.filepath = None
self._state = False
self._gpx = None
self._gpx_dict = None
# Public attributes
self.df = None
@classmethod
def from_path(cls, filepath: str):
gpx = cls()
gpx.filename = os.path.basename(filepath)
gpx.filepath = os.path.abspath(filepath)
if os.stat(filepath).st_size >= c.maximum_file_size:
raise LoadGpxError(f'Too big file: {gpx.filename}')
try:
with open(filepath, 'r') as gpx_file:
gpx._gpx = gpxpy.parse(gpx_file)
return gpx
except Exception as e:
raise LoadGpxError(f'Not able to load {gpx.filename} - {e}')
@classmethod
def from_bytes(cls, file: bytes, filename: str):
gpx = cls()
gpx.filename = filename
try:
gpx._gpx = gpxpy.parse(file)
return gpx
except Exception as e:
raise LoadGpxError(f'Not able to load {gpx.filename} - {e}')
def to_dict(self):
self._gpx_dict = {'lat': [], 'lon': [], 'ele': [], 'time': [],
'track': [], 'segment': []}
n_tracks = len(self._gpx.tracks)
n_segments = [len(track.segments) for track in self._gpx.tracks]
for i_track in range(n_tracks):
for i_seg in range(n_segments[i_track]):
for i_point in self._gpx.tracks[i_track].segments[i_seg].points:
self._gpx_dict['lat'].append(i_point.latitude)
self._gpx_dict['lon'].append(i_point.longitude)
self._gpx_dict['ele'].append(i_point.elevation if i_point.elevation else np.nan)
self._gpx_dict['time'].append(i_point.time if i_point.time else np.nan)
self._gpx_dict['track'].append(i_seg)
self._gpx_dict['segment'].append(i_track)
return self._gpx_dict
def to_pandas(self):
if not self._gpx_dict:
self.to_dict()
self.df = pd.DataFrame(self._gpx_dict,
columns=['lat', 'lon', 'ele',
'time', 'track', 'segment'])
self.df['time'] =
|
pd.to_datetime(self.df['time'], utc=True)
|
pandas.to_datetime
|
# Author: <NAME>
# Date: 26 November 2016
# Python version: 3.5
# Updated June 2018 by <NAME> (KTH dESA)
# Modified grid algorithm and population calibration to improve computational speed
import logging
import pandas as pd
from math import pi, exp, log, sqrt, ceil
# from pyproj import Proj
import numpy as np
from collections import defaultdict
import os
logging.basicConfig(format='%(asctime)s\t\t%(message)s', level=logging.DEBUG)
# general
LHV_DIESEL = 9.9445485 # (kWh/l) lower heating value
HOURS_PER_YEAR = 8760
# Columns in settlements file must match these exactly
SET_COUNTRY = 'Country' # This cannot be changed, lots of code will break
SET_X = 'X' # Coordinate in metres/kilometres
SET_Y = 'Y' # Coordinate in metres/kilometres
SET_X_DEG = 'X_deg' # Coordinates in degrees
SET_Y_DEG = 'Y_deg'
SET_POP = 'Pop' # Population in people per point (equally, people per km2)
SET_POP_CALIB = 'PopStartCalibrated' # Calibrated population to reference year, same units
SET_POP_FUTURE = 'PopFuture' # Project future population, same units
SET_GRID_DIST_CURRENT = 'GridDistCurrent' # Distance in km from current grid
SET_GRID_DIST_PLANNED = 'GridDistPlan' # Distance in km from current and future grid
SET_ROAD_DIST = 'RoadDist' # Distance in km from road network
SET_NIGHT_LIGHTS = 'VIIRS' # Intensity of night time lights (from NASA), range 0 - 63
SET_TRAVEL_HOURS = 'TravelHours' # Travel time to large city in hours
SET_GHI = 'GHI' # Global horizontal irradiance in kWh/m2/day
SET_WINDVEL = 'WindVel' # Wind velocity in m/s
SET_WINDCF = 'WindCF' # Wind capacity factor as percentage (range 0 - 1)
SET_HYDRO = 'Hydropower' # Hydropower potential in kW
SET_HYDRO_DIST = 'HydropowerDist' # Distance to hydropower site in km
SET_HYDRO_FID = 'HydropowerFID' # the unique tag for eah hydropower, to not over-utilise
SET_SUBSTATION_DIST = 'SubstationDist'
SET_ELEVATION = 'Elevation' # in metres
SET_SLOPE = 'Slope' # in degrees
SET_LAND_COVER = 'LandCover'
SET_SOLAR_RESTRICTION = 'SolarRestriction'
SET_ROAD_DIST_CLASSIFIED = 'RoadDistClassified'
SET_SUBSTATION_DIST_CLASSIFIED = 'SubstationDistClassified'
SET_ELEVATION_CLASSIFIED = 'ElevationClassified'
SET_SLOPE_CLASSIFIED = 'SlopeClassified'
SET_LAND_COVER_CLASSIFIED = 'LandCoverClassified'
SET_COMBINED_CLASSIFICATION = 'GridClassification'
SET_GRID_PENALTY = 'GridPenalty'
SET_URBAN = 'IsUrban' # Whether the site is urban (0 or 1)
SET_ENERGY_PER_HH = 'EnergyPerHH'
SET_NUM_PEOPLE_PER_HH = 'NumPeoplePerHH'
SET_ELEC_CURRENT = 'ElecStart' # If the site is currently electrified (0 or 1)
SET_ELEC_FUTURE = 'ElecFuture' # If the site has the potential to be 'easily' electrified in future
SET_NEW_CONNECTIONS = 'NewConnections' # Number of new people with electricity connections
SET_NEW_CONNECTIONS_PROD = 'New_Connections_Prod' # Number of new people with electricity connections plus productive uses corresponding
SET_MIN_GRID_DIST = 'MinGridDist'
SET_LCOE_GRID = 'Grid' # All lcoes in USD/kWh
SET_LCOE_SA_PV = 'SA_PV'
SET_LCOE_SA_DIESEL = 'SA_Diesel'
SET_LCOE_MG_WIND = 'MG_Wind'
SET_LCOE_MG_DIESEL = 'MG_Diesel'
SET_LCOE_MG_PV = 'MG_PV'
SET_LCOE_MG_HYDRO = 'MG_Hydro'
SET_LCOE_MG_HYBRID = 'MG_Hybrid'
SET_MIN_OFFGRID = 'MinimumOffgrid' # The technology with lowest lcoe (excluding grid)
SET_MIN_OVERALL = 'MinimumOverall' # Same as above, but including grid
SET_MIN_OFFGRID_LCOE = 'MinimumTechLCOE' # The lcoe value for minimum tech
SET_MIN_OVERALL_LCOE = 'MinimumOverallLCOE' # The lcoe value for overall minimum
SET_MIN_OVERALL_CODE = 'MinimumOverallCode' # And a code from 1 - 7 to represent that option
SET_MIN_CATEGORY = 'MinimumCategory' # The category with minimum lcoe (grid, minigrid or standalone)
SET_NEW_CAPACITY = 'NewCapacity' # Capacity in kW
SET_INVESTMENT_COST = 'InvestmentCost' # The investment cost in USD
# Columns in the specs file must match these exactly
SPE_COUNTRY = 'Country'
SPE_POP = 'Pop2016' # The actual population in the base year
SPE_URBAN = 'UrbanRatio2016' # The ratio of urban population (range 0 - 1) in base year
SPE_POP_FUTURE = 'Pop2030'
SPE_URBAN_FUTURE = 'UrbanRatio2030'
SPE_URBAN_MODELLED = 'UrbanRatioModelled' # The urban ratio in the model after calibration (for comparison)
SPE_URBAN_CUTOFF = 'UrbanCutOff' # The urban cutoff population calirated by the model, in people per km2
SPE_URBAN_GROWTH = 'UrbanGrowth' # The urban growth rate as a simple multplier (urban pop future / urban pop present)
SPE_RURAL_GROWTH = 'RuralGrowth' # Same as for urban
SPE_NUM_PEOPLE_PER_HH_RURAL = 'NumPeoplePerHHRural'
SPE_NUM_PEOPLE_PER_HH_URBAN = 'NumPeoplePerHHUrban'
SPE_DIESEL_PRICE_LOW = 'DieselPriceLow' # Diesel price in USD/litre
SPE_DIESEL_PRICE_HIGH = 'DieselPriceHigh' # Same, with a high forecast var
SPE_GRID_PRICE = 'GridPrice' # Grid price of electricity in USD/kWh
SPE_GRID_CAPACITY_INVESTMENT = 'GridCapacityInvestmentCost' # grid capacity investments costs from TEMBA USD/kW
SPE_GRID_LOSSES = 'GridLosses' # As a ratio (0 - 1)
SPE_BASE_TO_PEAK = 'BaseToPeak' # As a ratio (0 - 1)
SPE_EXISTING_GRID_COST_RATIO = 'ExistingGridCostRatio'
SPE_MAX_GRID_DIST = 'MaxGridDist'
SPE_ELEC = 'ElecActual' # Actual current percentage electrified population (0 - 1)
SPE_ELEC_MODELLED = 'ElecModelled' # The modelled version after calibration (for comparison)
SPE_MIN_NIGHT_LIGHTS = 'MinNightLights'
SPE_MAX_GRID_EXTENSION_DIST = 'MaxGridExtensionDist'
SPE_MAX_ROAD_DIST = 'MaxRoadDist'
SPE_POP_CUTOFF1 = 'PopCutOffRoundOne'
SPE_POP_CUTOFF2 = 'PopCutOffRoundTwo'
class Technology:
"""
Used to define the parameters for each electricity access technology, and to calculate the LCOE depending on
input parameters.
"""
start_year = 2016
end_year = 2030
discount_rate = 0.08
grid_cell_area = 1 # in km2, normally 1km2
mv_line_cost = 9000 # USD/km
lv_line_cost = 5000 # USD/km
mv_line_capacity = 50 # kW/line
lv_line_capacity = 10 # kW/line
lv_line_max_length = 30 # km
hv_line_cost = 53000 # USD/km
mv_line_max_length = 50 # km
hv_lv_transformer_cost = 5000 # USD/unit
mv_increase_rate = 0.1 # percentage
def __init__(self,
tech_life, # in years
base_to_peak_load_ratio,
distribution_losses=0, # percentage
connection_cost_per_hh=0, # USD/hh
om_costs=0.0, # OM costs as percentage of capital costs
capital_cost=0, # USD/kW
capacity_factor=1.0, # percentage
efficiency=1.0, # percentage
diesel_price=0.0, # USD/litre
grid_price=0.0, # USD/kWh for grid electricity
standalone=False,
mg_pv=False,
mg_wind=False,
mg_diesel=False,
mg_hydro=False,
grid_capacity_investment=0.0, # USD/kW for on-grid capacity investments (excluding grid itself)
diesel_truck_consumption=0, # litres/hour
diesel_truck_volume=0, # litres
om_of_td_lines=0): # percentage
self.distribution_losses = distribution_losses
self.connection_cost_per_hh = connection_cost_per_hh
self.base_to_peak_load_ratio = base_to_peak_load_ratio
self.tech_life = tech_life
self.om_costs = om_costs
self.capital_cost = capital_cost
self.capacity_factor = capacity_factor
self.efficiency = efficiency
self.diesel_price = diesel_price
self.grid_price = grid_price
self.standalone = standalone
self.mg_pv = mg_pv
self.mg_wind = mg_wind
self.mg_diesel = mg_diesel
self.mg_hydro = mg_hydro
self.grid_capacity_investment = grid_capacity_investment
self.diesel_truck_consumption = diesel_truck_consumption
self.diesel_truck_volume = diesel_truck_volume
self.om_of_td_lines = om_of_td_lines
def pv_diesel_hybrid(self,
energy_per_hh, # kWh/household/year as defined
max_ghi, # highest annual GHI value encountered in the GIS data
max_travel_hours, # highest value for travel hours encountered in the GIS data
diesel_no=1, # 50, # number of diesel generators simulated
pv_no=1, #70, # number of PV panel sizes simulated
n_chg=0.92, # charge efficiency of battery
n_dis=0.92, # discharge efficiency of battery
lpsp=0.05, # maximum loss of load allowed over the year, in share of kWh
battery_cost=150, # battery capital capital cost, USD/kWh of storage capacity
pv_cost=2490, # PV panel capital cost, USD/kW peak power
diesel_cost=550, # diesel generator capital cost, USD/kW rated power
pv_life=20, # PV panel expected lifetime, years
diesel_life=15, # diesel generator expected lifetime, years
pv_om=0.015, # annual OM cost of PV panels
diesel_om=0.1, # annual OM cost of diesel generator
k_t=0.005): # temperature factor of PV panels
ghi = pd.read_csv('Supplementary_files\GHI_hourly.csv', usecols=[4], sep=';', skiprows=21).as_matrix()
# hourly GHI values downloaded from SoDa for one location in the country
temp = pd.read_csv('Supplementary_files\Temperature_hourly.csv', usecols=[4], sep=';', skiprows=21).as_matrix()
# hourly temperature values downloaded from SoDa for one location in the country
hour_numbers = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23) * 365
LHV_DIESEL = 9.9445485
dod_max = 0.8 # maximum depth of discharge of battery
# the values below define the load curve for the five tiers. The values reflect the share of the daily demand
# expected in each hour of the day (sum of all values for one tier = 1)
tier5_load_curve = np.array([0.021008403, 0.021008403, 0.021008403, 0.021008403, 0.027310924, 0.037815126,
0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.042016807,
0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.046218487, 0.050420168,
0.067226891, 0.084033613, 0.073529412, 0.052521008, 0.033613445, 0.023109244])
tier4_load_curve = np.array([0.017167382, 0.017167382, 0.017167382, 0.017167382, 0.025751073, 0.038626609,
0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.042918455,
0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.0472103, 0.051502146,
0.068669528, 0.08583691, 0.075107296, 0.053648069, 0.034334764, 0.021459227])
tier3_load_curve = np.array([0.013297872, 0.013297872, 0.013297872, 0.013297872, 0.019060284, 0.034574468,
0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.044326241,
0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.048758865, 0.053191489,
0.070921986, 0.088652482, 0.077570922, 0.055407801, 0.035460993, 0.019946809])
tier2_load_curve = np.array([0.010224949, 0.010224949, 0.010224949, 0.010224949, 0.019427403, 0.034764826,
0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.040899796,
0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.04601227, 0.056237219,
0.081799591, 0.102249489, 0.089468303, 0.06390593, 0.038343558, 0.017893661])
tier1_load_curve = np.array([0, 0, 0, 0, 0.012578616, 0.031446541, 0.037735849, 0.037735849, 0.037735849,
0.037735849, 0.037735849, 0.037735849, 0.037735849, 0.037735849, 0.037735849,
0.037735849, 0.044025157, 0.062893082, 0.100628931, 0.125786164, 0.110062893,
0.078616352, 0.044025157, 0.012578616])
if energy_per_hh < 75:
load_curve = tier1_load_curve * energy_per_hh / 365
elif energy_per_hh < 365:
load_curve = tier2_load_curve * energy_per_hh / 365
elif energy_per_hh < 1241:
load_curve = tier3_load_curve * energy_per_hh / 365
elif energy_per_hh < 2993:
load_curve = tier4_load_curve * energy_per_hh / 365
else:
load_curve = tier5_load_curve * energy_per_hh / 365
def pv_diesel_capacities(pv_capacity, battery_size, diesel_capacity, initital_condition=False):
condition = 1
ren_limit = 0
break_hour = 17
while condition > lpsp:
dod = np.zeros(24)
battery_use = np.zeros(24) # Stores the amount of battery discharge during the day
fuel_result = 0
battery_life = 0
soc = 0.5
unmet_demand = 0
annual_diesel_gen = 0
for i in range(8760):
diesel_gen = 0
battery_use[hour_numbers[i]] = 0.0002 * soc # Battery self-discharge
soc *= 0.9998
t_cell = temp[i] + 0.0256 * ghi[i] # PV cell temperature
pv_gen = pv_capacity * 0.9 * ghi[i] / 1000 * (
1 - k_t * (t_cell - 298.15)) # PV generation in the hour
net_load = load_curve[hour_numbers[i]] - pv_gen # remaining load not met by PV panels
if net_load <= 0: # If pv generation is greater than load the excess energy is stored in battery
if battery_size > 0:
soc -= n_chg * net_load / battery_size
net_load = 0
max_diesel = min(diesel_capacity, net_load + (1 - soc) * battery_size / n_chg)
# Maximum aount of diesel needed to supply load and charge battery, limited by rated diesel capacity
# Below is the dispatch strategy for the diesel generator as described in word document
if break_hour + 1 > hour_numbers[i] > 4 and net_load > soc * battery_size * n_dis:
diesel_gen = min(diesel_capacity, max(0.4 * diesel_capacity, net_load))
elif 23 > hour_numbers[i] > break_hour and max_diesel > 0.40 * diesel_capacity:
diesel_gen = max_diesel
elif n_dis * soc * battery_size < net_load:
diesel_gen = max(0.4 * diesel_capacity, max_diesel)
if diesel_gen > 0: # Fuel consumption is stored
fuel_result += diesel_capacity * 0.08145 + diesel_gen * 0.246
annual_diesel_gen += diesel_gen
if (net_load - diesel_gen) > 0: # If diesel generator cannot meet load the battery is also used
if battery_size > 0:
soc -= (net_load - diesel_gen) / n_dis / battery_size
battery_use[hour_numbers[i]] += (net_load - diesel_gen) / n_dis / battery_size
if soc < 0: # If battery and diesel generator cannot supply load there is unmet demand
unmet_demand -= soc * n_dis * battery_size
battery_use[hour_numbers[i]] += soc
soc = 0
else: # If diesel generation is larger than load the excess energy is stored in battery
if battery_size > 0:
soc += (diesel_gen - net_load) * n_chg / battery_size
if battery_size == 0: # If no battery and diesel generation < net load there is unmet demand
unmet_demand += net_load - diesel_gen
soc = min(soc, 1) # Battery state of charge cannot be >1
dod[hour_numbers[i]] = 1 - soc # The depth of discharge in every hour of the day is storeed
if hour_numbers[i] == 23 and max(dod) > 0: # The battery wear during the last day is calculated
battery_life += sum(battery_use) / (531.52764 * max(0.1, (max(dod) * dod_max)) ** -1.12297)
condition = unmet_demand / energy_per_hh # lpsp is calculated
if initital_condition: # During the first calculation the minimum PV size with no diesel generator is calculated
if condition > lpsp:
pv_capacity *= (1 + unmet_demand / energy_per_hh / 4)
elif condition > lpsp or (annual_diesel_gen > (1 - ren_limit) * energy_per_hh): # For the remaining configurations the solution is considered unusable if lpsp criteria is not met
diesel_capacity = 99
condition = 0
battery_life = 1
elif condition < lpsp: # If lpsp criteria is met the expected battery life is stored
battery_life = np.round(1 / battery_life)
return pv_capacity, diesel_capacity, battery_size, fuel_result, battery_life
# Initial PV size when no diesel generator is used is calculated and used as reference
ref = pv_diesel_capacities(energy_per_hh / 3000, 2 * energy_per_hh / 365, 0, initital_condition=True)
battery_sizes = [0.3 * energy_per_hh / 365, 0.5 * energy_per_hh / 365, 0.75 * energy_per_hh / 365, energy_per_hh / 365, 2 * energy_per_hh / 365, 0] # [2 * energy_per_hh / 365, energy_per_hh / 365, 0]
ref_battery_size = np.zeros((len(battery_sizes), pv_no, diesel_no))
ref_panel_size = np.zeros((len(battery_sizes), pv_no, diesel_no))
ref_diesel_cap = np.zeros((len(battery_sizes), pv_no, diesel_no))
ref_fuel_result = np.zeros((len(battery_sizes), pv_no, diesel_no))
ref_battery_life = np.zeros((len(battery_sizes), pv_no, diesel_no))
# For the number of diesel, pv and battery capacities the lpsp, battery lifetime and fuel usage is calculated
for k in range(len(battery_sizes)):
for i in range(pv_no):
for j in range(diesel_no):
a = pv_diesel_capacities(ref[0] * (pv_no - i) / pv_no, battery_sizes[k],
j * max(load_curve) / diesel_no)
ref_panel_size[k, i, j] = a[0]
ref_diesel_cap[k, i, j] = a[1]
ref_battery_size[k, i, j] = a[2]
ref_fuel_result[k, i, j] = a[3]
ref_battery_life[k, i, j] = min(20, a[4]) # Battery life limited to maximum 20 years
# Neccessary information for calculation of LCOE is defined
project_life = self.end_year - self.start_year
ghi_steps = int(
ceil((max_ghi - 1000) / 50) + 1) # GHI values rounded to nearest 50 are used for reference matrix
diesel_cost_max = 2 * self.diesel_price * self.diesel_truck_consumption * max_travel_hours / self.diesel_truck_volume / LHV_DIESEL
diesel_steps = int(
ceil(diesel_cost_max * 100) + 1) # Diesel values rounded to 0.01 USD used for reference matrix
generation = np.ones(project_life) * energy_per_hh
generation[0] = 0
year = np.arange(project_life)
discount_factor = (1 + self.discount_rate) ** year
investment_table = np.zeros((ghi_steps, diesel_steps)) # Stores least-cost configuration investments
pv_table = np.zeros((ghi_steps, diesel_steps)) # Stores PV size for least-cost configuraton
diesel_table = np.zeros((ghi_steps, diesel_steps)) # Stores diesel capacity for least-cost configuration
lcoe_table = np.ones((ghi_steps, diesel_steps)) * 99 # Stores LCOE for least-cost configuration
choice_table = np.zeros((ghi_steps, diesel_steps))
# For each combination of GHI and diesel price the least costly configuration is calculated by iterating through
# the different configurations specified above
for i in range(ghi_steps):
pv_size = ref_panel_size * ghi.sum() / 1000 / (1000 + 50 * i)
for j in range(diesel_steps):
for k in range(pv_no):
for l in range(diesel_no):
for m in range(len(battery_sizes)):
investments = np.zeros(project_life)
salvage = np.zeros(project_life)
fuel_costs = np.ones(project_life) * ref_fuel_result[m, k, l] * (self.diesel_price + 0.01 * j)
investments[0] = pv_size[m, k, l] * pv_cost + ref_diesel_cap[m, k, l] * diesel_cost
salvage[-1] = ref_diesel_cap[m, k, l] * diesel_cost * (1 - project_life / diesel_life) + \
pv_size[m, k, l] * pv_cost * (1 - project_life / pv_life)
om = np.ones(project_life) * (
pv_size[m, k, l] * pv_cost * pv_om + ref_diesel_cap[m, k, l] * diesel_cost * diesel_om)
if pv_life < project_life:
investments[pv_life] = pv_size[m, k, l] * pv_cost
if diesel_life < project_life:
investments[diesel_life] = ref_diesel_cap[m, k, l] * diesel_cost
for n in range(project_life):
if year[n] % ref_battery_life[m, k, l] == 0:
investments[n] += ref_battery_size[m, k, l] * battery_cost / dod_max
salvage[-1] += (1 - (
(project_life % ref_battery_life[m, k, l]) / ref_battery_life[m, k, l])) * \
battery_cost * ref_battery_size[m, k, l] / dod_max + ref_diesel_cap[
m, k, l] * \
diesel_cost * (1 - (
project_life % diesel_life) / diesel_life) \
+ pv_size[m, k, l] * pv_cost * (1 - (project_life % pv_life) / pv_life)
discount_investments = (investments + fuel_costs - salvage + om) / discount_factor
discount_generation = generation / discount_factor
lcoe = np.sum(discount_investments) / np.sum(discount_generation)
if lcoe < lcoe_table[i, j]:
lcoe_table[i, j] = lcoe
pv_table[i, j] = pv_size[m, k, l]
diesel_table[i, j] = ref_diesel_cap[m, k, l]
investment_table[i, j] = np.sum(discount_investments)
choice_table[i, j] = (l + 1) * 10 + (k + 1) * 10000 + m + 1
# first number is PV size, second is diesel, third is battery
return lcoe_table, pv_table, diesel_table, investment_table, load_curve[19], choice_table
@classmethod
def set_default_values(cls, start_year, end_year, discount_rate, grid_cell_area, mv_line_cost, lv_line_cost,
mv_line_capacity, lv_line_capacity, lv_line_max_length, hv_line_cost, mv_line_max_length,
hv_lv_transformer_cost, mv_increase_rate):
cls.start_year = start_year
cls.end_year = end_year
cls.discount_rate = discount_rate
cls.grid_cell_area = grid_cell_area
cls.mv_line_cost = mv_line_cost
cls.lv_line_cost = lv_line_cost
cls.mv_line_capacity = mv_line_capacity
cls.lv_line_capacity = lv_line_capacity
cls.lv_line_max_length = lv_line_max_length
cls.hv_line_cost = hv_line_cost
cls.mv_line_max_length = mv_line_max_length
cls.hv_lv_transformer_cost = hv_lv_transformer_cost
cls.mv_increase_rate = mv_increase_rate
def get_lcoe(self, energy_per_hh, people, num_people_per_hh, additional_mv_line_length=0, capacity_factor=0,
mv_line_length=0, travel_hours=0, ghi=0, urban=0, get_capacity=False, mini_grid=False, pv=False,
urban_hybrid=0, rural_hybrid=0, get_investment_cost=False, mg_pv=False, mg_wind=False,
mg_hydro=False, mg_diesel=False, mg_hybrid=False):
"""
Calculates the LCOE depending on the parameters. Optionally calculates the investment cost instead.
The only required parameters are energy_per_hh, people and num_people_per_hh
additional_mv_line_length requried for grid
capacity_factor required for PV and wind
mv_line_length required for hydro
travel_hours required for diesel
"""
if people == 0:
# If there are no people, the investment cost is zero.
if get_investment_cost:
return 0
# Otherwise we set the people low (prevent div/0 error) and continue.
else:
people = 0.00001
# If a new capacity factor isn't given, use the class capacity factor (for hydro, diesel etc)
if capacity_factor == 0:
capacity_factor = self.capacity_factor
consumption = people / num_people_per_hh * energy_per_hh # kWh/year
average_load = consumption / (1 - self.distribution_losses) / HOURS_PER_YEAR # kW
if mg_hybrid and urban == 1:
peak_load = urban_hybrid[4] * consumption
# peak_load = people / num_people_per_hh * urban_hybrid[4] * (1 + self.distribution_losses)
elif mg_hybrid and urban == 0:
peak_load = rural_hybrid[4] * consumption
# peak_load = people / num_people_per_hh * rural_hybrid[4] * (1 + self.distribution_losses)
else:
peak_load = average_load / self.base_to_peak_load_ratio # kW
no_mv_lines = peak_load / self.mv_line_capacity
no_lv_lines = peak_load / self.lv_line_capacity
lv_networks_lim_capacity = no_lv_lines / no_mv_lines
lv_networks_lim_length = ((self.grid_cell_area / no_mv_lines) / (self.lv_line_max_length / sqrt(2))) ** 2
actual_lv_lines = min([people / num_people_per_hh, max([lv_networks_lim_capacity, lv_networks_lim_length])])
hh_per_lv_network = (people / num_people_per_hh) / (actual_lv_lines * no_mv_lines)
lv_unit_length = sqrt(self.grid_cell_area / (people / num_people_per_hh)) * sqrt(2) / 2
lv_lines_length_per_lv_network = 1.333 * hh_per_lv_network * lv_unit_length
total_lv_lines_length = no_mv_lines * actual_lv_lines * lv_lines_length_per_lv_network
line_reach = (self.grid_cell_area / no_mv_lines) / (2 * sqrt(self.grid_cell_area / no_lv_lines))
total_length_of_lines = min([line_reach, self.mv_line_max_length]) * no_mv_lines
additional_hv_lines = max(
[0, round(sqrt(self.grid_cell_area) / (2 * min([line_reach, self.mv_line_max_length])) / 10, 3) - 1])
hv_lines_total_length = (sqrt(self.grid_cell_area) / 2) * additional_hv_lines * sqrt(self.grid_cell_area)
num_transformers = additional_hv_lines + no_mv_lines + (no_mv_lines * actual_lv_lines)
generation_per_year = average_load * HOURS_PER_YEAR
# The investment and O&M costs are different for grid and non-grid solutions
if self.grid_price > 0 :
td_investment_cost = hv_lines_total_length * self.hv_line_cost + \
total_length_of_lines * self.mv_line_cost + \
total_lv_lines_length * self.lv_line_cost + \
num_transformers * self.hv_lv_transformer_cost + \
(people / num_people_per_hh) * self.connection_cost_per_hh + \
additional_mv_line_length * (
self.mv_line_cost * (1 + self.mv_increase_rate) **
((additional_mv_line_length / 5) - 1))
td_om_cost = td_investment_cost * self.om_of_td_lines
total_investment_cost = td_investment_cost
total_om_cost = td_om_cost
fuel_cost = self.grid_price
else:
total_lv_lines_length *= 0 if self.standalone else 0.75
mv_total_line_cost = self.mv_line_cost * mv_line_length
lv_total_line_cost = self.lv_line_cost * total_lv_lines_length
td_investment_cost = mv_total_line_cost + lv_total_line_cost + (
people / num_people_per_hh) * self.connection_cost_per_hh
td_om_cost = td_investment_cost * self.om_of_td_lines
installed_capacity = peak_load / capacity_factor
if self.standalone:
if self.diesel_price > 0:
if (installed_capacity / people / num_people_per_hh) < 1:
installed_capacity = 1 * people / num_people_per_hh
if installed_capacity / (people / num_people_per_hh) < 0.020:
capital_investment = installed_capacity * self.capital_cost[0.020]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.020] * self.om_costs * installed_capacity)
elif installed_capacity / (people / num_people_per_hh) < 0.050:
capital_investment = installed_capacity * self.capital_cost[0.050]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.050] * self.om_costs * installed_capacity)
elif installed_capacity / (people / num_people_per_hh) < 0.100:
capital_investment = installed_capacity * self.capital_cost[0.100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.100] * self.om_costs * installed_capacity)
elif installed_capacity / (people / num_people_per_hh) < 0.200:
capital_investment = installed_capacity * self.capital_cost[0.200]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.200] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[0.300]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.300] * self.om_costs * installed_capacity)
elif self.mg_pv:
if installed_capacity < 50:
capital_investment = installed_capacity * self.capital_cost[50]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[50] * self.om_costs * installed_capacity)
elif installed_capacity < 75:
capital_investment = installed_capacity * self.capital_cost[75]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[75] * self.om_costs * installed_capacity)
elif installed_capacity < 100:
capital_investment = installed_capacity * self.capital_cost[100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[100] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[200]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[200] * self.om_costs * installed_capacity)
elif self.mg_wind:
if installed_capacity < 100:
capital_investment = installed_capacity * self.capital_cost[100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[100] * self.om_costs * installed_capacity)
elif installed_capacity < 1000:
capital_investment = installed_capacity * self.capital_cost[1000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[1000] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[10000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[10000] * self.om_costs * installed_capacity)
elif self.mg_hydro:
if installed_capacity < 1:
capital_investment = installed_capacity * self.capital_cost[1]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[1] * self.om_costs * installed_capacity)
elif installed_capacity < 100:
capital_investment = installed_capacity * self.capital_cost[100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[100] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[5000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[5000] * self.om_costs * installed_capacity)
elif self.mg_diesel:
if installed_capacity < 100:
capital_investment = installed_capacity * self.capital_cost[100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[100] * self.om_costs * installed_capacity)
elif installed_capacity < 1000:
capital_investment = installed_capacity * self.capital_cost[1000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[1000] * self.om_costs * installed_capacity)
elif installed_capacity < 5000:
capital_investment = installed_capacity * self.capital_cost[5000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[5000] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[25000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[25000] * self.om_costs * installed_capacity)
elif mg_hybrid:
capital_investment = installed_capacity * self.capital_cost
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost * self.om_costs * installed_capacity)
# If a diesel price has been passed, the technology is diesel
if self.diesel_price > 0 and not mg_hybrid:
# And we apply the Szabo formula to calculate the transport cost for the diesel
# p = (p_d + 2*p_d*consumption*time/volume)*(1/mu)*(1/LHVd)
fuel_cost = (self.diesel_price + 2 * self.diesel_price * self.diesel_truck_consumption * travel_hours /
self.diesel_truck_volume) / LHV_DIESEL / self.efficiency
# Otherwise it's hydro/wind etc with no fuel cost
else:
fuel_cost = 0
# Perform the time-value LCOE calculation
project_life = self.end_year - self.start_year
reinvest_year = 0
# If the technology life is less than the project life, we will have to invest twice to buy it again
if self.tech_life < project_life:
reinvest_year = self.tech_life
year = np.arange(project_life)
el_gen = generation_per_year * np.ones(project_life)
el_gen[0] = 0
discount_factor = (1 + self.discount_rate) ** year
investments = np.zeros(project_life)
investments[0] = total_investment_cost
if reinvest_year:
investments[reinvest_year] = total_investment_cost
salvage = np.zeros(project_life)
used_life = project_life
if reinvest_year:
# so salvage will come from the remaining life after the re-investment
used_life = project_life - self.tech_life
salvage[-1] = total_investment_cost * (1 - used_life / self.tech_life)
operation_and_maintenance = total_om_cost * np.ones(project_life)
operation_and_maintenance[0] = 0
fuel = el_gen * fuel_cost
fuel[0] = 0
if mg_hybrid:
diesel_lookup = int(round(2 * self.diesel_price * self.diesel_truck_consumption *
travel_hours / self.diesel_truck_volume / LHV_DIESEL * 100))
renewable_lookup = int(round((ghi - 1000) / 50))
if urban == 1 and pv:
ref_table = urban_hybrid[0]
ref_investments = urban_hybrid[3]
ref_capacity = urban_hybrid[1] + urban_hybrid[2]
elif urban == 0 and pv:
ref_table = rural_hybrid[0]
ref_investments = rural_hybrid[3]
ref_capacity = rural_hybrid[1] + rural_hybrid[2]
add_lcoe = ref_table[renewable_lookup, diesel_lookup]
add_investments = ref_investments[renewable_lookup, diesel_lookup] * people / num_people_per_hh
add_capacity = ref_capacity[renewable_lookup, diesel_lookup] * people / num_people_per_hh
# So we also return the total investment cost for this number of people
if get_investment_cost:
discounted_investments = investments / discount_factor
if mini_grid:
return add_investments + np.sum(discounted_investments)
else:
return np.sum(discounted_investments) + self.grid_capacity_investment * peak_load
# return np.sum(discounted_investments) + self.grid_capacity_investment * peak_load
elif get_capacity:
return add_capacity
else:
discounted_costs = (investments + operation_and_maintenance + fuel - salvage) / discount_factor
discounted_generation = el_gen / discount_factor
if mini_grid:
return np.sum(discounted_costs) / np.sum(discounted_generation) + add_lcoe
else:
return np.sum(discounted_costs) / np.sum(discounted_generation)
# return np.sum(discounted_costs) / np.sum(discounted_generation)
def get_grid_table(self, energy_per_hh, num_people_per_hh, max_dist):
"""
Uses calc_lcoe to generate a 2D grid with the grid LCOEs, for faster access in teh electrification algorithm
"""
logging.info('Creating a grid table for {} kWh/hh/year'.format(energy_per_hh))
# Coarser resolution at the high end (just to catch the few places with exceptional population density)
# The electrification algorithm must round off with the same scheme
people_arr_direct = list(range(1000)) + list(range(1000, 10000, 10)) + list(range(10000, 350000, 1000))
elec_dists = range(0, int(max_dist) + 20) # add twenty to handle edge cases
grid_lcoes =
|
pd.DataFrame(index=elec_dists, columns=people_arr_direct)
|
pandas.DataFrame
|
import collections
import logging
import os
import pprint
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import pytest
import core.artificial_signal_generators as cartif
import core.signal_processing as csigna
import helpers.git as git
import helpers.printing as hprint
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class Test__compute_lagged_cumsum(hut.TestCase):
def test1(self) -> None:
input_df = self._get_df()
output_df = csigna._compute_lagged_cumsum(input_df, 3)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test2(self) -> None:
input_df = self._get_df()
input_df.columns = ["x", "y1", "y2"]
output_df = csigna._compute_lagged_cumsum(input_df, 3, ["y1", "y2"])
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test_lag_1(self) -> None:
input_df = self._get_df()
input_df.columns = ["x", "y1", "y2"]
output_df = csigna._compute_lagged_cumsum(input_df, 1, ["y1", "y2"])
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
@staticmethod
def _get_df() -> pd.DataFrame:
df = pd.DataFrame([list(range(10))] * 3).T
df[1] = df[0] + 1
df[2] = df[0] + 2
df.index = pd.date_range(start="2010-01-01", periods=10)
df.rename(columns=lambda x: f"col_{x}", inplace=True)
return df
class Test_correlate_with_lagged_cumsum(hut.TestCase):
def test1(self) -> None:
input_df = self._get_arma_df()
output_df = csigna.correlate_with_lagged_cumsum(
input_df, 3, y_vars=["y1", "y2"]
)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test2(self) -> None:
input_df = self._get_arma_df()
output_df = csigna.correlate_with_lagged_cumsum(
input_df, 3, y_vars=["y1"], x_vars=["x"]
)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
@staticmethod
def _get_arma_df(seed: int = 0) -> pd.DataFrame:
arma_process = cartif.ArmaProcess([], [])
date_range = {"start": "2010-01-01", "periods": 40, "freq": "M"}
srs1 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed
).rename("x")
srs2 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed + 1
).rename("y1")
srs3 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed + 2
).rename("y2")
return pd.concat([srs1, srs2, srs3], axis=1)
class Test_accumulate(hut.TestCase):
def test1(self) -> None:
srs = pd.Series(
range(0, 20), index=pd.date_range("2010-01-01", periods=20)
)
actual = csigna.accumulate(srs, num_steps=1)
expected = srs.astype(float)
pd.testing.assert_series_equal(actual, expected)
def test2(self) -> None:
idx = pd.date_range("2010-01-01", periods=10)
srs = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], index=idx)
actual = csigna.accumulate(srs, num_steps=2)
expected = pd.Series([np.nan, 1, 3, 5, 7, 9, 11, 13, 15, 17], index=idx)
pd.testing.assert_series_equal(actual, expected)
def test3(self) -> None:
idx =
|
pd.date_range("2010-01-01", periods=10)
|
pandas.date_range
|
import os
from FinRL.finrl import config
from FinRL.finrl import config_tickers
if not os.path.exists("./" + config.DATA_SAVE_DIR):
os.makedirs("./" + config.DATA_SAVE_DIR)
if not os.path.exists("./" + config.TRAINED_MODEL_DIR):
os.makedirs("./" + config.TRAINED_MODEL_DIR)
if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR):
os.makedirs("./" + config.TENSORBOARD_LOG_DIR)
if not os.path.exists("./" + config.RESULTS_DIR):
os.makedirs("./" + config.RESULTS_DIR)
from FinRL.finrl.finrl_meta.preprocessor.yahoodownloader import YahooDownloader
from FinRL.finrl.finrl_meta.preprocessor.preprocessors import FeatureEngineer, data_split
import pandas as pd
df = YahooDownloader(start_date = '2008-01-01',
end_date = '2022-06-02',
ticker_list = config_tickers.DOW_30_TICKER).fetch_data()
fe = FeatureEngineer(
use_technical_indicator=True,
use_turbulence=False,
user_defined_feature = False)
df = fe.preprocess_data(df)
# add covariance matrix as states
df = df.sort_values(['date', 'tic'], ignore_index=True)
df.index = df.date.factorize()[0]
cov_list = []
return_list = []
# look back is one year
lookback = 252
for i in range(lookback, len(df.index.unique())):
data_lookback = df.loc[i - lookback:i, :]
price_lookback = data_lookback.pivot_table(index='date', columns='tic', values='close')
return_lookback = price_lookback.pct_change().dropna()
return_list.append(return_lookback)
covs = return_lookback.cov().values
cov_list.append(covs)
df_cov = pd.DataFrame({'date': df.date.unique()[lookback:], 'cov_list': cov_list, 'return_list': return_list})
df = df.merge(df_cov, on='date')
df = df.sort_values(['date', 'tic']).reset_index(drop=True)
train = data_split(df, '2009-04-01','2020-03-31')
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from stable_baselines3.common.vec_env import DummyVecEnv
class StockPortfolioEnv(gym.Env):
"""A portfolio allocation environment for OpenAI gym
Attributes
----------
df: DataFrame
input data
stock_dim : int
number of unique stocks
hmax : int
maximum number of shares to trade
initial_amount : int
start money
transaction_cost_pct: float
transaction cost percentage per trade
reward_scaling: float
scaling factor for reward, good for training
state_space: int
the dimension of input features
action_space: int
equals stock dimension
tech_indicator_list: list
a list of technical indicator names
turbulence_threshold: int
a threshold to control risk aversion
day: int
an increment number to control date
Methods
-------
_sell_stock()
perform sell action based on the sign of the action
_buy_stock()
perform buy action based on the sign of the action
step()
at each step the agent will return actions, then
we will calculate the reward, and return the next observation.
reset()
reset the environment
render()
use render to return other functions
save_asset_memory()
return account value at each time step
save_action_memory()
return actions/positions at each time step
"""
metadata = {'render.modes': ['human']}
def __init__(self,
df,
stock_dim,
hmax,
initial_amount,
transaction_cost_pct,
reward_scaling,
state_space,
action_space,
tech_indicator_list,
turbulence_threshold=None,
lookback=252,
day=0):
# super(StockEnv, self).__init__()
# money = 10 , scope = 1
self.day = day
self.lookback = lookback
self.df = df
self.stock_dim = stock_dim
self.hmax = hmax
self.initial_amount = initial_amount
self.transaction_cost_pct = transaction_cost_pct
self.reward_scaling = reward_scaling
self.state_space = state_space
self.action_space = action_space
self.tech_indicator_list = tech_indicator_list
# action_space normalization and shape is self.stock_dim
self.action_space = spaces.Box(low=0, high=1, shape=(self.action_space,))
self.observation_space = spaces.Box(low=-np.inf, high=np.inf,
shape=(self.state_space + len(self.tech_indicator_list), self.state_space))
# load data from a pandas dataframe
self.data = self.df.loc[self.day, :]
self.covs = self.data['cov_list'].values[0]
self.state = np.append(np.array(self.covs),
[self.data[tech].values.tolist() for tech in self.tech_indicator_list], axis=0)
self.terminal = False
self.turbulence_threshold = turbulence_threshold
# initalize state: inital portfolio return + individual stock return + individual weights
self.portfolio_value = self.initial_amount
# memorize portfolio value each step
self.asset_memory = [self.initial_amount]
# memorize portfolio return each step
self.portfolio_return_memory = [0]
self.actions_memory = [[1 / self.stock_dim] * self.stock_dim]
self.date_memory = [self.data.date.unique()[0]]
def step(self, actions):
self.terminal = self.day >= len(self.df.index.unique()) - 1
if self.terminal:
df = pd.DataFrame(self.portfolio_return_memory)
df.columns = ['daily_return']
plt.plot(df.daily_return.cumsum(), 'r')
plt.savefig('results/cumulative_reward.png')
plt.close()
plt.plot(self.portfolio_return_memory, 'r')
plt.savefig('results/rewards.png')
plt.close()
print("=================================")
print("begin_total_asset:{}".format(self.asset_memory[0]))
print("end_total_asset:{}".format(self.portfolio_value))
df_daily_return = pd.DataFrame(self.portfolio_return_memory)
df_daily_return.columns = ['daily_return']
if df_daily_return['daily_return'].std() != 0:
sharpe = (252 ** 0.5) * df_daily_return['daily_return'].mean() / \
df_daily_return['daily_return'].std()
print("Sharpe: ", sharpe)
print("=================================")
return self.state, self.reward, self.terminal, {}
else:
weights = self.softmax_normalization(actions)
self.actions_memory.append(weights)
last_day_memory = self.data
# load next state
self.day += 1
self.data = self.df.loc[self.day, :]
self.covs = self.data['cov_list'].values[0]
self.state = np.append(np.array(self.covs),
[self.data[tech].values.tolist() for tech in self.tech_indicator_list], axis=0)
portfolio_return = sum(((self.data.close.values / last_day_memory.close.values) - 1) * weights)
log_portfolio_return = np.log(sum((self.data.close.values / last_day_memory.close.values) * weights))
# update portfolio value
new_portfolio_value = self.portfolio_value * (1 + portfolio_return)
self.portfolio_value = new_portfolio_value
# save into memory
self.portfolio_return_memory.append(portfolio_return)
self.date_memory.append(self.data.date.unique()[0])
self.asset_memory.append(new_portfolio_value)
# the reward is the new portfolio value or end portfolo value
self.reward = new_portfolio_value
return self.state, self.reward, self.terminal, {}
def reset(self):
self.asset_memory = [self.initial_amount]
self.day = 0
self.data = self.df.loc[self.day, :]
# load states
self.covs = self.data['cov_list'].values[0]
self.state = np.append(np.array(self.covs),
[self.data[tech].values.tolist() for tech in self.tech_indicator_list], axis=0)
self.portfolio_value = self.initial_amount
# self.cost = 0
# self.trades = 0
self.terminal = False
self.portfolio_return_memory = [0]
self.actions_memory = [[1 / self.stock_dim] * self.stock_dim]
self.date_memory = [self.data.date.unique()[0]]
return self.state
def render(self, mode='human'):
return self.state
def softmax_normalization(self, actions):
numerator = np.exp(actions)
denominator = np.sum(np.exp(actions))
softmax_output = numerator / denominator
return softmax_output
def save_asset_memory(self):
date_list = self.date_memory
portfolio_return = self.portfolio_return_memory
# print(len(date_list))
# print(len(asset_list))
df_account_value = pd.DataFrame({'date': date_list, 'daily_return': portfolio_return})
return df_account_value
def save_action_memory(self):
# date and close price length must match actions length
date_list = self.date_memory
df_date = pd.DataFrame(date_list)
df_date.columns = ['date']
action_list = self.actions_memory
df_actions = pd.DataFrame(action_list)
df_actions.columns = self.data.tic.values
df_actions.index = df_date.date
# df_actions = pd.DataFrame({'date':date_list,'actions':action_list})
return df_actions
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_sb_env(self):
e = DummyVecEnv([lambda: self])
obs = e.reset()
return e, obs
stock_dimension = len(train.tic.unique())
state_space = stock_dimension
print(f"Stock Dimension: {stock_dimension}, State Space: {state_space}")
tech_indicator_list = ['macd', 'rsi_30', 'cci_30', 'dx_30']
feature_dimension = len(tech_indicator_list)
print(f"Feature Dimension: {feature_dimension}")
env_kwargs = {
"hmax": 100,
"initial_amount": 1000000,
"transaction_cost_pct": 0,
"state_space": state_space,
"stock_dim": stock_dimension,
"tech_indicator_list": tech_indicator_list,
"action_space": stock_dimension,
"reward_scaling": 1e-1
}
e_train_gym = StockPortfolioEnv(df=train, **env_kwargs)
env_train, _ = e_train_gym.get_sb_env()
print(type(env_train))
from FinRL.finrl.agents.stablebaselines3.models import DRLAgent
agent = DRLAgent(env = env_train)
A2C_PARAMS = {"n_steps": 10, "ent_coef": 0.005, "learning_rate": 0.0004}
model_a2c = agent.get_model(model_name="a2c",model_kwargs = A2C_PARAMS)
trained_a2c = agent.train_model(model=model_a2c, tb_log_name='a2c',
total_timesteps=40000)
agent = DRLAgent(env = env_train)
PPO_PARAMS = {
"n_steps": 2048,
"ent_coef": 0.005,
"learning_rate": 0.001,
"batch_size": 128,
}
model_ppo = agent.get_model("ppo",model_kwargs = PPO_PARAMS)
trained_ppo = agent.train_model(model=model_ppo,
tb_log_name='ppo',
total_timesteps=40000)
trade = data_split(df,'2020-04-01', '2022-05-31')
e_trade_gym = StockPortfolioEnv(df = trade, **env_kwargs)
import torch
import plotly.express as px
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
import pandas as pd
from pypfopt import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt import objective_functions
unique_tic = trade.tic.unique()
unique_trade_date = trade.date.unique()
import pyfolio
from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline,convert_daily_return_to_pyfolio_ts
baseline_df = get_baseline(
ticker="^DJI",
start = '2020-07-01',
end = '2021-09-01')
baseline_df_stats = backtest_stats(baseline_df, value_col_name = 'close')
baseline_returns = get_daily_return(baseline_df, value_col_name="close")
dji_cumpod =(baseline_returns+1).cumprod()-1
from pyfolio import timeseries
df_daily_return_a2c, df_actions_a2c = DRLAgent.DRL_prediction(model=trained_a2c,
environment = e_trade_gym)
df_daily_return_ppo, df_actions_ppo = DRLAgent.DRL_prediction(model=trained_ppo,
environment = e_trade_gym)
time_ind = pd.Series(df_daily_return_a2c.date)
a2c_cumpod =(df_daily_return_a2c.daily_return+1).cumprod()-1
ppo_cumpod =(df_daily_return_ppo.daily_return+1).cumprod()-1
DRL_strat_a2c = convert_daily_return_to_pyfolio_ts(df_daily_return_a2c)
DRL_strat_ppo = convert_daily_return_to_pyfolio_ts(df_daily_return_ppo)
perf_func = timeseries.perf_stats
perf_stats_all_a2c = perf_func( returns=DRL_strat_a2c,
factor_returns=DRL_strat_a2c,
positions=None, transactions=None, turnover_denom="AGB")
perf_stats_all_ppo = perf_func( returns=DRL_strat_ppo,
factor_returns=DRL_strat_ppo,
positions=None, transactions=None, turnover_denom="AGB")
def extract_weights(drl_actions_list):
a2c_weight_df = {'date':[], 'weights':[]}
for i in range(len(drl_actions_list)):
date = drl_actions_list.index[i]
tic_list = list(drl_actions_list.columns)
weights_list = drl_actions_list.reset_index()[list(drl_actions_list.columns)].iloc[i].values
weight_dict = {'tic':[], 'weight':[]}
for j in range(len(tic_list)):
weight_dict['tic'] += [tic_list[j]]
weight_dict['weight'] += [weights_list[j]]
a2c_weight_df['date'] += [date]
a2c_weight_df['weights'] += [pd.DataFrame(weight_dict)]
a2c_weights = pd.DataFrame(a2c_weight_df)
return a2c_weights
a2c_weights = extract_weights(df_actions_a2c)
ppo_weights = extract_weights(df_actions_ppo)
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn import svm
from sklearn import linear_model
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
def prepare_data(trainData):
train_date = sorted(set(trainData.date.values))
X = []
for i in range(0, len(train_date) - 1):
d = train_date[i]
d_next = train_date[i+1]
y = train.loc[train['date'] == d_next].return_list.iloc[0].loc[d_next].reset_index()
y.columns = ['tic', 'return']
x = train.loc[train['date'] == d][['tic','macd','rsi_30','cci_30','dx_30']]
train_piece = pd.merge(x, y, on = 'tic')
train_piece['date'] = [d] * len(train_piece)
X += [train_piece]
trainDataML = pd.concat(X)
X = trainDataML[tech_indicator_list].values
Y = trainDataML[['return']].values
return X, Y
train_X, train_Y = prepare_data(train)
rf_model = RandomForestRegressor(max_depth = 35, min_samples_split = 10, random_state = 0).fit(train_X, train_Y.reshape(-1))
dt_model = DecisionTreeRegressor(random_state = 0, max_depth=35, min_samples_split = 10 ).fit(train_X, train_Y.reshape(-1))
svm_model = SVR(epsilon=0.14).fit(train_X, train_Y.reshape(-1))
lr_model = LinearRegression().fit(train_X, train_Y)
def output_predict(model, reference_model=False):
meta_coefficient = {"date": [], "weights": []}
portfolio = pd.DataFrame(index=range(1), columns=unique_trade_date)
initial_capital = 1000000
portfolio.loc[0, unique_trade_date[0]] = initial_capital
for i in range(len(unique_trade_date) - 1):
current_date = unique_trade_date[i]
next_date = unique_trade_date[i + 1]
df_current = df[df.date == current_date].reset_index(drop=True)
tics = df_current['tic'].values
features = df_current[tech_indicator_list].values
df_next = df[df.date == next_date].reset_index(drop=True)
if not reference_model:
predicted_y = model.predict(features)
mu = predicted_y
Sigma = risk_models.sample_cov(df_current.return_list[0], returns_data=True)
else:
mu = df_next.return_list[0].loc[next_date].values
Sigma = risk_models.sample_cov(df_next.return_list[0], returns_data=True)
predicted_y_df = pd.DataFrame({"tic": tics.reshape(-1, ), "predicted_y": mu.reshape(-1, )})
min_weight, max_weight = 0, 1
ef = EfficientFrontier(mu, Sigma)
weights = ef.nonconvex_objective(
objective_functions.sharpe_ratio,
objective_args=(ef.expected_returns, ef.cov_matrix),
weights_sum_to_one=True,
constraints=[
{"type": "ineq", "fun": lambda w: w - min_weight}, # greater than min_weight
{"type": "ineq", "fun": lambda w: max_weight - w}, # less than max_weight
],
)
weight_df = {"tic": [], "weight": []}
meta_coefficient["date"] += [current_date]
# it = 0
for item in weights:
weight_df['tic'] += [item]
weight_df['weight'] += [weights[item]]
weight_df = pd.DataFrame(weight_df).merge(predicted_y_df, on=['tic'])
meta_coefficient["weights"] += [weight_df]
cap = portfolio.iloc[0, i]
# current cash invested for each stock
current_cash = [element * cap for element in list(weights.values())]
# current held shares
current_shares = list(np.array(current_cash) / np.array(df_current.close))
# next time period price
next_price = np.array(df_next.close)
portfolio.iloc[0, i + 1] = np.dot(current_shares, next_price)
portfolio = portfolio.T
portfolio.columns = ['account_value']
portfolio = portfolio.reset_index()
portfolio.columns = ['date', 'account_value']
stats = backtest_stats(portfolio, value_col_name='account_value')
portfolio_cumprod = (portfolio.account_value.pct_change() + 1).cumprod() - 1
return portfolio, stats, portfolio_cumprod, pd.DataFrame(meta_coefficient)
lr_portfolio, lr_stats, lr_cumprod, lr_weights = output_predict(lr_model)
dt_portfolio, dt_stats, dt_cumprod, dt_weights = output_predict(dt_model)
svm_portfolio, svm_stats, svm_cumprod, svm_weights = output_predict(svm_model)
rf_portfolio, rf_stats, rf_cumprod, rf_weights = output_predict(rf_model)
reference_portfolio, reference_stats, reference_cumprod, reference_weights = output_predict(None, True)
def calculate_gradient(model, interpolated_input, actions, feature_idx, stock_idx, h = 1e-1):
forward_input = interpolated_input
forward_input[feature_idx + stock_dimension][stock_idx] += h
forward_Q = model.policy.evaluate_actions(torch.FloatTensor(forward_input).reshape(-1,stock_dimension*(stock_dimension + feature_dimension)), torch.FloatTensor(actions).reshape(-1,stock_dimension))
interpolated_Q = model.policy.evaluate_actions(torch.FloatTensor(interpolated_input).reshape(-1,stock_dimension*(stock_dimension + feature_dimension)), torch.FloatTensor(actions).reshape(-1,stock_dimension))
forward_Q = forward_Q[0].detach().cpu().numpy()[0]
interpolated_Q = interpolated_Q[0].detach().cpu().numpy()[0]
return (forward_Q - interpolated_Q) / h
import copy
meta_Q = {"date": [], "feature": [], "Saliency Map": [], "algo": []}
for algo in {"A2C", "PPO"}:
if algo == "A2C":
prec_step = 1e-2
else:
prec_step = 1e-1
model = eval("trained_" + algo.lower())
df_actions = eval("df_actions_" + algo.lower())
for i in range(len(unique_trade_date) - 1):
date = unique_trade_date[i]
covs = trade[trade['date'] == date].cov_list.iloc[0]
features = trade[trade['date'] == date][tech_indicator_list].values # N x K
actions = df_actions.loc[date].values
for feature_idx in range(len(tech_indicator_list)):
int_grad_per_feature = 0
for stock_idx in range(features.shape[0]): # N
int_grad_per_stock = 0
avg_interpolated_grad = 0
for alpha in range(1, 51):
scale = 1 / 50
baseline_features = copy.deepcopy(features)
baseline_noise = np.random.normal(0, 1, stock_dimension)
baseline_features[:, feature_idx] = [0] * stock_dimension
interpolated_features = baseline_features + scale * alpha * (features - baseline_features) # N x K
interpolated_input = np.append(covs, interpolated_features.T, axis=0)
interpolated_gradient = \
calculate_gradient(model, interpolated_input, actions, feature_idx, stock_idx, h=prec_step)[0]
avg_interpolated_grad += interpolated_gradient * scale
int_grad_per_stock = (features[stock_idx][feature_idx] - 0) * avg_interpolated_grad
int_grad_per_feature += int_grad_per_stock
meta_Q['date'] += [date]
meta_Q['algo'] += [algo]
meta_Q['feature'] += [tech_indicator_list[feature_idx]]
meta_Q['Saliency Map'] += [int_grad_per_feature]
meta_Q = pd.DataFrame(meta_Q)
import statsmodels.api as sm
meta_score_coef = {"date":[], "coef":[], "algo":[]}
for algo in ["LR", "RF", "Reference Model", "SVM", "DT", "A2C", "PPO"]:
if algo == "LR":
weights = lr_weights
elif algo == "RF":
weights = rf_weights
elif algo == "DT":
weights = dt_weights
elif algo == "SVM":
weights = svm_weights
elif algo == "A2C":
weights = a2c_weights
elif algo == "PPO":
weights = ppo_weights
else:
weights = reference_weights
for i in range(len(unique_trade_date) - 1):
date = unique_trade_date[i]
next_date = unique_trade_date[i+1]
df_temp = df[df.date==date].reset_index(drop=True)
df_temp_next = df[df.date==next_date].reset_index(drop=True)
weight_piece = weights[weights.date == date].iloc[0]['weights']
piece_return = pd.DataFrame(df_temp_next.return_list.iloc[0].loc[next_date]).reset_index()
piece_return.columns = ['tic', 'return']
X = df_temp[['macd','rsi_30', 'cci_30', 'dx_30', 'tic']]
X_next = df_temp_next[['macd','rsi_30', 'cci_30', 'dx_30', 'tic']]
piece = weight_piece.merge(X, on = 'tic').merge(piece_return, on = 'tic')
piece['Y'] = piece['return'] * piece['weight']
X = piece[['macd','rsi_30', 'cci_30', 'dx_30']]
X = sm.add_constant(X)
Y = piece[['Y']]
model = sm.OLS(Y,X)
results = model.fit()
meta_score_coef["coef"] += [(X * results.params).sum(axis = 0)]
meta_score_coef["date"] += [date]
meta_score_coef["algo"] += [algo]
meta_score_coef =
|
pd.DataFrame(meta_score_coef)
|
pandas.DataFrame
|
import numpy as np
import os.path
import pandas as pd
import sys
import math
# find parent directory and import base (travis)
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
# print(sys.path)
# print(os.path)
class LeslieProbitInputs(ModelSharedInputs):
"""
Input class for LeslieProbit.
"""
def __init__(self):
"""Class representing the inputs for LeslieProbit"""
super(LeslieProbitInputs, self).__init__()
# self.a_n = pd.Series([], dtype="object")
# self.c_n = pd.Series([], dtype="object")
self.grass_type = pd.Series([], dtype="object")
self.percent_active_ingredient = pd.Series([], dtype="float")
self.foliar_half_life = pd.Series([], dtype="float")
self.sol = pd.Series([], dtype="float")
self.time_steps = pd.Series([], dtype="float")
self.number_applications = pd.Series([], dtype="float")
self.application_rates = pd.Series([], dtype="float")
self.application_days = pd.Series([], dtype="float")
self.b = pd.Series([], dtype="float")
self.test_species = pd.Series([], dtype="object")
self.ld50_test = pd.Series([], dtype="float")
# self.bw_tested = pd.Series([], dtype="float")
# self.ass_species = pd.Series([], dtype="object")
# self.bw_ass = pd.Series([], dtype="float")
self.mineau_scaling_factor = pd.Series([], dtype="float")
self.probit_gamma = pd.Series([], dtype="float")
self.init_pop_size = pd.Series([], dtype="float")
self.stages = pd.Series([], dtype="float")
self.l_m =
|
pd.Series([], dtype="float")
|
pandas.Series
|
import pandas as pd
import numpy as np
import os
from sklearn import metrics
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression
from itertools import combinations
def load_data(file_name):
df =
|
pd.read_csv(file_name)
|
pandas.read_csv
|
import pickle
import pandas as pd
import numpy as np
crnn2_result = pickle.load(open('../../CRNN2/crnn_results/crnn_results_summary.p', 'rb'))
crnn4_result = pickle.load(open('../../CRNN4/crnn_results/crnn_results_summary.p', 'rb'))
crnn6_result = pickle.load(open('../../CRNN6/crnn_results/crnn_results_summary.p', 'rb'))
crnn8_result = pickle.load(open('../../CRNN8/crnn_results/crnn_results_summary.p', 'rb'))
crnn10_result = pickle.load(open('../../CRNN10/crnn_results/crnn_results_summary.p', 'rb'))
crnn40_result = pickle.load(open('../../CRNN40/crnn_results/crnn_results_summary.p', 'rb'))
crnn100_result = pickle.load(open('../../CRNN100/crnn_results/crnn_results_summary.p', 'rb'))
crnn400_result = pickle.load(open('../../CRNN400/crnn_results/crnn_results_summary.p', 'rb'))
crnn1200_result = pickle.load(open('../../CRNN1200/crnn_results/crnn_results_summary.p', 'rb'))
vgg_result = pickle.load(open('../../VGG/results/vgg_results_summary.p', 'rb'))
lenet_result = pickle.load(open('../../LENET/results/lenet_results_summary.p', 'rb'))
svm_result = pickle.load(open('../../SVM/results/svm_results_summary.p', 'rb'))
result_summary = {'crnn2': pd.DataFrame(crnn2_result), 'crnn4': pd.DataFrame(crnn4_result), 'crnn6': pd.DataFrame(crnn6_result),
'crnn8': pd.DataFrame(crnn8_result), 'crnn10': pd.DataFrame(crnn10_result), 'crnn40': pd.DataFrame(crnn40_result),
'crnn100': pd.DataFrame(crnn100_result), 'crnn400': pd.DataFrame(crnn400_result), 'crnn1200': pd.DataFrame(crnn1200_result),
'vgg': pd.DataFrame(vgg_result), 'lenet': pd.DataFrame(lenet_result), 'svm': pd.DataFrame(svm_result)}
result_summary = pd.concat(result_summary)
result_summary.to_csv('../result/result_summary.csv', sep = ',')
crnn_pitch_shift = pickle.load(open('../../CRNN400/crnn_results/pitch_shift_results.p', 'rb'))
crnn_time_stretch = pickle.load(open('../../CRNN400/crnn_results/time_stretch_results.p', 'rb'))
crnn_crop = pickle.load(open('../../CRNN400/crnn_results/crop_results.p', 'rb'))
lenet_pitch_shift = pickle.load(open('../../LENET/results/pitch_shift_results.p', 'rb'))
lenet_time_stretch = pickle.load(open('../../LENET/results/time_stretch_results.p', 'rb'))
lenet_crop = pickle.load(open('../../LENET/results/crop_results.p', 'rb'))
svm_pitch_shift = pickle.load(open('../../SVM/results/pitch_shift_results.p', 'rb'))
svm_time_stretch = pickle.load(open('../../SVM/results/time_stretch_results.p', 'rb'))
svm_crop = pickle.load(open('../../SVM/results/crop_results.p', 'rb'))
simulation_summary = {'crnn_picth_shift': pd.DataFrame(crnn_pitch_shift), 'crnn_time_stretch': pd.DataFrame(crnn_time_stretch),
'crnn_crop': pd.DataFrame(crnn_crop), 'lenet_picth_shift': pd.DataFrame(lenet_pitch_shift),
'lenet_time_stretch': pd.DataFrame(lenet_time_stretch), 'lenet_crop': pd.DataFrame(lenet_crop),
'svm_pitch_shift': pd.DataFrame(svm_pitch_shift), 'svm_time_stretch': pd.DataFrame(svm_time_stretch),
'svm_crop': pd.DataFrame(svm_crop)}
simulation_summary = pd.concat(simulation_summary)
simulation_summary.to_csv('../result/simulation_summary.csv', sep = ',')
###############################################3
crnn_result = pickle.load(open('../../CRNN400/crnn_results/crnn_results_summary.p', 'rb'))
lenet_result = pickle.load(open('../../LENET/results/lenet_results_summary.p', 'rb'))
svm_result = pickle.load(open('../../SVM/results/svm_results_summary.p', 'rb'))
result_event = {'crnn': crnn_result['threshold_result']['label event'], 'lenet_hmm_bino': lenet_result['hmm_bino_threshold_result']['label event'],
'lenet_hmm_gmm': lenet_result['hmm_gmm_result']['label event'], 'lenet': lenet_result['threshold_result']['label event'],
'svm_hmm_bino': svm_result['hmm_bino_result']['label event'], 'svm': svm_result['svm_result']['label event'],
'crnn_hmm': crnn_result['hmm_bino_threshold_result']['label event']}#add a crnn hmm result
result_event = pd.DataFrame(result_event)
result_event.to_csv('../result/result_event2.csv', sep = ',', index= False)
#####################################################
crnn_result_proportion_075 = pickle.load(open('../../CRNN400_proportion_data/crnn_results/proportion_0/crnn_results_summary.p', 'rb'))
crnn_result_proportion_050 = pickle.load(open('../../CRNN400_proportion_data/crnn_results/proportion_1/crnn_results_summary.p', 'rb'))
crnn_result_proportion_025 = pickle.load(open('../../CRNN400_proportion_data/crnn_results/proportion_2/crnn_results_summary.p', 'rb'))
lenet_result_proportion_075 = pickle.load(open('../../LENET_proportion_data/results/proportion_0/lenet_results_summary.p', 'rb'))
lenet_result_proportion_050 = pickle.load(open('../../LENET_proportion_data/results/proportion_1/lenet_results_summary.p', 'rb'))
lenet_result_proportion_025 = pickle.load(open('../../LENET_proportion_data/results/proportion_2/lenet_results_summary.p', 'rb'))
svm_result_proportion_075 = pickle.load(open('../../SVM_proportion_data/results/proportion_0/svm_results_summary.p', 'rb'))
svm_result_proportion_050 = pickle.load(open('../../SVM_proportion_data/results/proportion_1/svm_results_summary.p', 'rb'))
svm_result_proportion_025 = pickle.load(open('../../SVM_proportion_data/results/proportion_2/svm_results_summary.p', 'rb'))
proportion_data_summary = {'crnn_proportion_075': pd.DataFrame(crnn_result_proportion_075),
'crnn_proportion_050': pd.DataFrame(crnn_result_proportion_050),
'crnn_proportion_025': pd.DataFrame(crnn_result_proportion_025),
'lenet_proportion_075': pd.DataFrame(lenet_result_proportion_075),
'lenet_proportion_050': pd.DataFrame(lenet_result_proportion_050),
'lenet_proportion_025': pd.DataFrame(lenet_result_proportion_025),
'svm_proportion_075': pd.DataFrame(svm_result_proportion_075),
'svm_proportion_050': pd.DataFrame(svm_result_proportion_050),
'svm_proportion_025':
|
pd.DataFrame(svm_result_proportion_025)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from scipy.stats import ttest_ind
from Regression import linear_regression
help(linear_regression)
lr = linear_regression()
class Robustness:
def stars(self, p):
if p <= 0.001:
return '***'
elif p <= 0.05:
return '**'
elif p <= 0.1:
return '*'
else:
return ''
def double_sort(self, X, y, group_names, ngroup=5, take_in_reg = False):
"""
X: contains cat_names
take_in_reg: whether take the group_names into regression or not, Default False -> treate like traditional Fama Model alpha
group_names: list of two strings, first name will be show on the index, second name will be show on the column
sort the regression residuals by two cat_names, compare n (biggest) vs 1 (smallest) group by t-test
"""
X_cols = list(X.columns)
if not take_in_reg:
for group in group_names:
X_cols.remove(group)
lr.ols_fit(X[X_cols], y, if_print=False)
resid = lr.get_residuals()
XX = pd.concat([X[group_names],
|
pd.Series(resid, name='residual', index=X.index)
|
pandas.Series
|
#!/usr/bin/env python3
"""<NAME> (2020). Parses downloaded data files.
"""
import pandas as pd
import numpy as np
def setup_datasets():
"""Parses downloaded AGES datasets.
Returns:
collated (dict): dictionary of parsed .csv files.
"""
df_bundesland = pd.read_csv(
"../data/austria/CovidFaelle_Timeline.csv",
sep=";",
index_col=0,
parse_dates=[0],
infer_datetime_format=True,
decimal=",",
)
df_districts = pd.read_csv(
"../data/austria/CovidFaelle_Timeline_GKZ.csv",
sep=";",
index_col=0,
parse_dates=[0],
infer_datetime_format=True,
decimal=",",
)
df_fz = pd.read_csv(
"../data/austria/CovidFallzahlen.csv",
sep=";",
index_col=0,
parse_dates=[0],
dayfirst=True,
decimal=",",
)
# ICU and hospitalisation
df_fz["PercentHospOcc"] = 100 * df_fz["FZHosp"].div(
df_fz["FZHosp"] + df_fz["FZHospFree"]
)
df_fz["PercentICUOcc"] = 100 * df_fz["FZICU"].div(
df_fz["FZICU"] + df_fz["FZICUFree"]
)
# split datasets
df_oeste = df_bundesland[df_bundesland["BundeslandID"] == 10]
df_tirol = df_bundesland[df_bundesland["Bundesland"] == "Tirol"]
df_wien = df_bundesland[df_bundesland["Bundesland"] == "Wien"]
df_innsbruck = df_districts[df_districts["Bezirk"] == "Innsbruck-Land"]
df_innsbruck_city = df_districts[df_districts["Bezirk"] == "Innsbruck-Stadt"]
df_fz_oeste = df_fz[df_fz["BundeslandID"] == 10]
df_fz_tirol = df_fz[df_fz["Bundesland"] == "Tirol"]
df_fz_tirol[df_fz_tirol.index == "2021-01-07"]
collated = {
"austria": df_oeste,
"all_states": df_bundesland,
"all_districts": df_districts,
"casualties": df_fz,
"tirol": df_tirol,
"wien": df_wien,
"innsbruck_land": df_innsbruck,
"innsbruck_city": df_innsbruck_city,
"casualties_austria": df_fz_oeste,
"casualties_tirol": df_fz_tirol,
}
return collated
def setup_vaccination_data(path="./data/europe/data.csv"):
"""Parses downloaded ECDC vaccination datasets, adjusts datetime format.
Args:
path (str): path to csv file
Returns:
df (pandas.DataFrame): formatted ECDC dataset.
"""
df = pd.read_csv(path, sep=",", index_col=0,)
df.index =
|
pd.to_datetime(df.index + "-1", format="%G-W%V-%u")
|
pandas.to_datetime
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 9 10:40:27 2019
This file handles the communication with the ICOS Sparql Endpoint.
Instances of RunSparql() is quering the ICOS Sparql endpoint,
and returns a formatted response.
"""
__author__ = ["<NAME>"]
__credits__ = "ICOS Carbon Portal"
__license__ = "GPL-3.0"
__version__ = "0.1.0"
__maintainer__ = "ICOS Carbon Portal, elaborated products team"
__email__ = ['<EMAIL>', '<EMAIL>']
__status__ = "rc1"
__date__ = "2019-08-09"
import requests
import pandas as pd
class RunSparql():
"""
Class to send a sparql query to the icos endpoint and get
formated output back.
:param sparql_query, string, valid query
:param output_format, define format of returned object ['json', 'csv', 'array', 'dict', 'pandas']
:return False, if query is not successful otherwise output_format(results)
"""
def __init__(self, sparql_query='', output_format='txt'):
self.format = output_format
self.query = sparql_query
self.__result = False
@property
def format(self):
""" contains the output format for the results from the ICOS sqparql endpoint"""
return self.__format
@format.setter
def format(self, fmt):
"""
You can set the format of the results returned after running the query.
Allowed formats are:
- 'json' (default), string
- 'csv' a string object with comma separated values, where the the first row contains the column names
Python specific formats:
- 'dict' a python dict representing the json object
- 'pandas' a pandas table with column names
- 'list'
- 'array' returns TWO python arrays, the first with the headers, the second with the values.
"""
allowed = ['json', 'csv', 'dict', 'pandas', 'array', 'html']
try:
fmt = str(fmt)
except TypeError:
return 0
if fmt.lower() in allowed:
self.__format = fmt.lower()
else:
self.__format = 'json'
@property
def query(self):
""" Import a sparql query. No validation is performed. """
return self.__query
@query.setter
def query(self, query):
try:
query = str(query)
except TypeError:
return 0
self.__query = str(query)
# ------------------------------------------------------------------------
def data(self):
return self.__result
# ------------------------------------------------------------------------
def run(self):
"""
This functions queries the ICOS sparql endpoint queryString.
By default the returned object is the raw "json" object.
"""
# check if query is set
if not len(self.__query):
print('no query found')
return
url = 'https://meta.icos-cp.eu/sparql'
r = requests.get(url, params={'query': self.__query})
if not r.ok:
print(r.ok, r.reason)
return r.ok, r.reason
# now check what format we should return.
# either set the __result directly, or call a transform
# function which sets the __result.
if self.format == 'json':
self.__result = r.text
if self.__format == 'dict':
self.__result = r.json()
if self.__format == 'csv':
self.__result = self.__to_csv(r.json())
if self.__format == 'pandas':
self.__result = self.__to_pandas(r.json())
if self.__format == 'array':
self.__result = self.__to_array(r.json())
if self.__format == 'html':
self.__result = self.__to_html(r.json())
return self.__result
# ------------------------------------------------------------------------
def __to_array(self, data):
# convert the the result into two arrays
# colName, colData
colname = data['head']['vars']
coldata = []
for row in data['results']['bindings']:
item = []
for c in colname:
item.append(row.get(c, {}).get('value'))
coldata.append(item)
return colname, coldata
# ------------------------------------------------------------------------
def __to_csv(self, data):
colname, coldata = self.__to_array(data)
# add the header line
csvstring = ','.join(colname) + '\n'
for row in coldata:
csvstring += ','.join(row) + '\n'
return csvstring
# ------------------------------------------------------------------------
def __to_pandas(self, data):
colname, coldata = self.__to_array(data)
return
|
pd.DataFrame(coldata, columns=colname)
|
pandas.DataFrame
|
# Dec 21 to mod for optional outputting original counts
##
#---------------------------------------------------------------------
# SERVER only input all files (.bam and .fa) output MeH matrix in .csv
# Oct 19, 2021 ML after imputation test
# github
#---------------------------------------------------------------------
import random
import math
import pysam
import csv
import sys
import pandas as pd
import numpy as np
import datetime
import time as t
from collections import Counter, defaultdict, OrderedDict
#---------------------------------------
# Functions definition
#---------------------------------------
def open_log(fname):
open_log.logfile = open(fname, 'w', 1)
def logm(message):
log_message = "[%s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
print(log_message),
open_log.logfile.write(log_message)
def close_log():
open_log.logfile.close()
# Check whether a window has enough reads for complete/impute
def enough_reads(window,w,complete):
temp=np.isnan(window).sum(axis=1)==0
if complete: # For heterogeneity estimation
return temp.sum()>=3
else: # for imputation
tempw1=np.isnan(window).sum(axis=1)==1
#return temp.sum()>=2**(w-2) and tempw1.sum()>0
return temp.sum()>=2 and tempw1.sum()>0
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def outwindow(pat,patori,pos,chrom,w,M,UM,Mo,UMo,mC=4,strand='f',optional=False):
# get complete reads
tempori=np.isnan(patori).sum(axis=1)==0
patori=patori[np.where(tempori)[0],:]
countori=np.zeros((2**w,1))
temp=np.isnan(pat).sum(axis=1)==0
pat=pat[np.where(temp)[0],:]
count=np.zeros((2**w,1))
# m=np.shape(pat)[0]
pat=np.array(pat)
if w==2:
pat = Counter([str(i[0])+str(i[1]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00','10','01','11']])
if optional:
patori = Counter([str(i[0])+str(i[1]) for i in patori.astype(int).tolist()])
countori=np.array([float(patori[i]) for i in ['00','10','01','11']])
if w==3:
pat = Counter([str(i[0])+str(i[1])+str(i[2]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000','100','010','110','001','101','011','111']])
if optional:
patori = Counter([str(i[0])+str(i[1])+str(i[2]) for i in patori.astype(int).tolist()])
countori=np.array([float(patori[i]) for i in ['000','100','010','110','001','101','011','111']])
if w==4:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if optional:
patori = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in patori.astype(int).tolist()])
countori=np.array([float(patori[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if w==5:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if optional:
patori = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in patori.astype(int).tolist()])
countori = np.array([float(patori[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if w==6:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
if optional:
patori = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in patori.astype(int).tolist()])
countori = np.array([float(patori[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
countori=countori.reshape(2**w)
countori=np.concatenate((countori[[0]],countori))
if w==3 and not optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'M':M,'UM':UM,'strand':strand}, index=[0])
if w==3 and optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p01o':countori[1],'p02o':countori[2],'p03o':countori[3],'p04o':countori[4],\
'p05o':countori[5],'p06o':countori[6],'p07o':countori[7],'p08o':countori[8],'M':M,'UM':UM,'Mo':Mo,'UMo':UMo,'strand':strand}, index=[0])
if w==4 and not optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'M':M,'UM':UM,'strand':strand}, index=[0])
if w==4 and optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p01o':countori[1],'p02o':countori[2],'p03o':countori[3],'p04o':countori[4],\
'p05o':countori[5],'p06o':countori[6],'p07o':countori[7],'p08o':countori[8],'p09o':countori[9],'p10o':countori[10],\
'p11o':countori[11],'p12o':countori[12],'p13o':countori[13],'p14o':countori[14],'p15o':countori[15],\
'p16o':countori[16],'M':M,'UM':UM,'Mo':Mo,'UMo':UMo,'strand':strand}, index=[0])
if w==5 and not optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'M':M,'UM':UM,'strand':strand}, index=[0])
if w==5 and optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p01o':countori[1],'p02o':countori[2],'p03o':countori[3],'p04o':countori[4],\
'p05o':countori[5],'p06o':countori[6],'p07o':countori[7],'p08o':countori[8],'p09o':countori[9],'p10o':countori[10],\
'p11o':countori[11],'p12o':countori[12],'p13o':countori[13],'p14o':countori[14],'p15o':countori[15],\
'p16o':countori[16],'p17o':countori[17],'p18o':countori[18],'p19o':countori[19],'p20o':countori[20],\
'p21o':countori[21],'p22o':countori[22],'p23o':countori[23],'p24o':countori[24],'p25o':countori[25],\
'p26o':countori[26],'p27o':countori[27],'p28o':countori[28],'p29o':countori[29],'p30o':countori[30],\
'p31o':countori[31],'p32o':countori[32],'M':M,'UM':UM,'Mo':Mo,'UMo':UMo,'strand':strand}, index=[0])
if w==6 and not optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'M':M,'UM':UM,\
'strand':strand}, index=[0])
if w==6 and optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'p01o':countori[1],'p02o':countori[2],\
'p03o':countori[3],'p04o':countori[4],\
'p05o':countori[5],'p06o':countori[6],'p07o':countori[7],'p08o':countori[8],'p09o':countori[9],'p10o':countori[10],\
'p11o':countori[11],'p12o':countori[12],'p13o':countori[13],'p14o':countori[14],'p15o':countori[15],\
'p16o':countori[16],'p17o':countori[17],'p18o':countori[18],'p19o':countori[19],'p20o':countori[20],\
'p21o':countori[21],'p22o':countori[22],'p23o':countori[23],'p24o':countori[24],'p25o':countori[25],\
'p26o':countori[26],'p27o':countori[27],'p28o':countori[28],'p29o':countori[29],'p30o':countori[30],\
'p31o':countori[31],'p32o':countori[32],'p33o':countori[33],'p34o':countori[34],\
'p35o':countori[35],'p36o':countori[36],'p37o':countori[37],'p38o':countori[38],'p39o':countori[39],'p40o':countori[40],\
'p41o':countori[41],'p42o':countori[42],'p43o':countori[43],'p44o':countori[44],'p45o':countori[45],\
'p46o':countori[46],'p47o':countori[47],'p48o':countori[48],'p49o':countori[49],'p50o':countori[50],\
'p51o':countori[51],'p52o':countori[52],'p53o':countori[53],'p54o':countori[54],'p55o':countori[55],\
'p56o':countori[56],'p57o':countori[57],'p58o':countori[58],'p59o':countori[59],'p60o':countori[60],\
'p61o':countori[61],'p62o':countori[62],'p63o':countori[63],'p64o':countori[64],'M':M,'UM':UM,'Mo':Mo,'UMo':UMo,\
'strand':strand}, index=[0])
return opt
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def CGgenome_scr(bamfile,chrom,w,fa,mC=4,silence=False,optional=False,folder='MeHdata'):
filename, file_extension = os.path.splitext(bamfile)
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("%s/%s.bam" % (folder,filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('%s/%s.fa' % (folder,fa))
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if w==3 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','M','UM','strand'])
if w==4 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','M','UM','strand'])
if w==5 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'M','UM','Mo','UMo','strand'])
if w==6 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46',\
'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60',\
'p61','p62','p63','p64','M','UM','strand'])
if w==7 and not optional:
ResultPW = pd.DataFrame(columns=\
['chrom','pos','M','UM','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'p65','p66','p67','p68','p69','p70','p71','p72','p73','p74','p75','p76','p77','p78','p79','p80','p81','p82','p83','p84','p85','p86'\
,'p87','p88','p89','p90','p91','p92','p93','p94','p95','p96','p97','p98','p99','p100','p101','p102','p103','p104'\
,'p105','p106','p107','p108','p109','p120','p121','p122','p123','p124','p125','p126','p127','p128','strand'])
if w==3 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','M','UM','Mo','UMo','strand'])
if w==4 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','M','UM','Mo','UMo','strand'])
if w==5 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','p17o','p18o',\
'p19o','p20o','p21o','p22o','p23o','p24o','p25o','p26o','p27o','p28o','p29o','p30o','p31o','p32o',\
'M','UM','Mo','UMo','strand'])
if w==6 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46',\
'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60',\
'p61','p62','p63','p64','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','p17o','p18o',\
'p19o','p20o','p21o','p22o','p23o','p24o','p25o','p26o','p27o','p28o','p29o','p30o','p31o','p32o',\
'p33o','p34o','p35o','p36o','p37o','p38o','p39o','p40o','p41o','p42o','p43o','p44o','p45o','p46o',\
'p47o','p48o','p49o','p50o','p51o','p52o','p53o','p54o','p55o','p56o','p57o','p58o','p59o','p60o',\
'p61o','p62o','p63o','p64o','M','UM','Mo','UMo','strand'])
neverr = never = True
chrom_list = []
# all samples' bam files
for i in samfile.get_index_statistics():
chrom_list.append(i.contig)
if chrom in chrom_list:
# screen bamfile by column
for pileupcolumn in samfile.pileup(chrom):
coverage += 1
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now(),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# Forward strand, check if 'CG' in reference genome
if (fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+2)=='CG'):
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
# append reads in the column
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
temp=temp.append(df2, ignore_index=True)
# merge with other columns
if (not temp.empty):
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# Reverse strand, check if 'CG' in reference genome
if pileupcolumn.pos>1:
if (fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos+1)=='CG'):
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
dfr2 = pd.DataFrame(data=dr)
tempr=tempr.append(dfr2, ignore_index=True)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
# Impute and estimate, if there are 2w-1 columns
if never and aggreC.shape[1] == (2*w):
# C/G to 1, rest to 0, N to NA
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC
meth = methbin.copy()
# remove read ID
meth = meth.drop('Qname',axis=1)
# back up for imputation
methtemp = meth.copy()
# imputation by sliding windows of w C by 1 C
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# save methylation statuses before imputation
# check if eligible for imputation, impute
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# overwrite imputed window
# meth = methtemp.copy()
# Evaluate methylation level and methylation heterogeneity and append to result
for i in range(0,w,1): # w windows
windowold = meth.iloc[:,range(i,i+w)].values
window = methtemp.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='f',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
# remove 1 column
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
# drop rows with no values
aggreC.dropna(axis = 0, thresh=2, inplace = True)
# total += w
# Reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
# for i in range(0,meth.shape[1]-w+1,1):
# if i<w:
for i in range(0,w,1):
windowold = meth.iloc[:,range(i,i+w)].values
window = methtemp.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='r',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
windowold = meth.iloc[:,range(i,i+w)].values
window = methtemp.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='f',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"%s/CG_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
windowold = meth.iloc[:,range(i,i+w)].values
window = methtemp.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='r',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"%s/CG_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"%s/CG_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
return filename, coverage, cov_context, 'CG'
print("Done CG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
#samfile.close()
def CHHgenome_scr(bamfile,chrom,w,fa,mC=4,silence=False,optional=False,folder='MeHdata',minML=0.05):
filename, file_extension = os.path.splitext(bamfile)
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("%s/%s.bam" % (folder,filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('%s/%s.fa' % (folder,fa))
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if w==3 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','M','UM','strand'])
if w==4 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','M','UM','strand'])
if w==5 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'M','UM','Mo','UMo','strand'])
if w==6 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46',\
'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60',\
'p61','p62','p63','p64','M','UM','strand'])
if w==7 and not optional:
ResultPW = pd.DataFrame(columns=\
['chrom','pos','M','UM','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'p65','p66','p67','p68','p69','p70','p71','p72','p73','p74','p75','p76','p77','p78','p79','p80','p81','p82','p83','p84','p85','p86'\
,'p87','p88','p89','p90','p91','p92','p93','p94','p95','p96','p97','p98','p99','p100','p101','p102','p103','p104'\
,'p105','p106','p107','p108','p109','p120','p121','p122','p123','p124','p125','p126','p127','p128','strand'])
if w==3 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','M','UM','Mo','UMo','strand'])
if w==4 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','M','UM','Mo','UMo','strand'])
if w==5 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','p17o','p18o',\
'p19o','p20o','p21o','p22o','p23o','p24o','p25o','p26o','p27o','p28o','p29o','p30o','p31o','p32o',\
'M','UM','Mo','UMo','strand'])
if w==6 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46',\
'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60',\
'p61','p62','p63','p64','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','p17o','p18o',\
'p19o','p20o','p21o','p22o','p23o','p24o','p25o','p26o','p27o','p28o','p29o','p30o','p31o','p32o',\
'p33o','p34o','p35o','p36o','p37o','p38o','p39o','p40o','p41o','p42o','p43o','p44o','p45o','p46o',\
'p47o','p48o','p49o','p50o','p51o','p52o','p53o','p54o','p55o','p56o','p57o','p58o','p59o','p60o',\
'p61o','p62o','p63o','p64o','M','UM','Mo','UMo','strand'])
neverr = never = True
if samfile.is_valid_reference_name(chrom):
for pileupcolumn in samfile.pileup(chrom):
coverage += 1
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHH %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# forward
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)!='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)!='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
tempr=tempr.append(df2, ignore_index=True)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = methtemp.iloc[:,range(i,i+w)].values
windowold = meth.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
if M/depth > minML:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='f',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = methtemp.iloc[:,range(i,i+w)].values
windowold = meth.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
if M/depth > minML:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='r',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','G','A'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = methtemp.iloc[:,range(i,i+w)].values
windowold = meth.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
if M/depth > minML:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='f',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"%s/CHH_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = methtemp.iloc[:,range(i,i+w)].values
windowold = meth.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
if M/depth > minML:
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='r',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"%s/CHH_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"%s/CHH_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
return sample, coverage, cov_context, 'CHH'
print("Done CHH for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
def CHGgenome_scr(bamfile,chrom,w,fa,mC=4,silence=False,optional=False,folder='MeHdata',minML=0.05):
filename, file_extension = os.path.splitext(bamfile)
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("%s/%s.bam" % (folder,filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('%s/%s.fa' % (folder,fa))
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if w==3 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','M','UM','strand'])
if w==4 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','M','UM','strand'])
if w==5 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'M','UM','strand'])
if w==6 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46',\
'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60',\
'p61','p62','p63','p64','M','UM','strand'])
if w==7 and not optional:
ResultPW = pd.DataFrame(columns=\
['chrom','pos','M','UM','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'p65','p66','p67','p68','p69','p70','p71','p72','p73','p74','p75','p76','p77','p78','p79','p80','p81','p82','p83','p84','p85','p86'\
,'p87','p88','p89','p90','p91','p92','p93','p94','p95','p96','p97','p98','p99','p100','p101','p102','p103','p104'\
,'p105','p106','p107','p108','p109','p120','p121','p122','p123','p124','p125','p126','p127','p128','strand'])
if w==3 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','M','UM','Mo','UMo','strand'])
if w==4 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','M','UM','Mo','UMo','strand'])
if w==5 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','p17o','p18o',\
'p19o','p20o','p21o','p22o','p23o','p24o','p25o','p26o','p27o','p28o','p29o','p30o','p31o','p32o',\
'M','UM','Mo','UMo','strand'])
if w==6 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46',\
'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60',\
'p61','p62','p63','p64','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','p17o','p18o',\
'p19o','p20o','p21o','p22o','p23o','p24o','p25o','p26o','p27o','p28o','p29o','p30o','p31o','p32o',\
'p33o','p34o','p35o','p36o','p37o','p38o','p39o','p40o','p41o','p42o','p43o','p44o','p45o','p46o',\
'p47o','p48o','p49o','p50o','p51o','p52o','p53o','p54o','p55o','p56o','p57o','p58o','p59o','p60o',\
'p61o','p62o','p63o','p64o','M','UM','Mo','UMo','strand'])
neverr = never = True
start=datetime.datetime.now()
if samfile.is_valid_reference_name(chrom):
for pileupcolumn in samfile.pileup(chrom):
coverage += 1
#chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)=='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)=='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # G
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2r = pd.DataFrame(data=dr)
#df2.head()
tempr=tempr.append(df2r, ignore_index=True)
if (not tempr.empty):
#temp.head()
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','G','N'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
windowold = meth.iloc[:,range(i,i+w)].values
window = methtemp.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
if M/depth > minML:
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='f',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','C','T'],np.nan)
methbin = aggreR # backup
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
windowold = meth.iloc[:,range(i,i+w)].values
window = methtemp.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
if M/depth > minML:
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='r',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#total += w
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','A','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# cover original matrix
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
windowold = meth.iloc[:,range(i,i+w)].values
window = methtemp.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
if M/depth > minML:
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='f',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000:
ResultPW.to_csv(r"%s/CHG_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# cover original matrix
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
windowold = meth.iloc[:,range(i,i+w)].values
window = methtemp.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
if M/depth > minML:
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='r',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHG_%s_%s.csv"%(filename,chrom),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"%s/CHG_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
return filename, coverage, cov_context, 'CHG'
print("Done CHG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
def split_bam(samplenames,Folder):
# get bam size
spbam_list = []
bamfile = samplenames + '.bam'
statinfo_out = os.stat(Folder+bamfile)
bamsize = statinfo_out.st_size
samfile = pysam.Samfile(Folder+bamfile, "rb")
fileout_base = os.path.splitext(bamfile)[0] # filename
ext = '.bam'
x = 0
fileout = Folder+fileout_base+"_" + str(x)+ext # filename_x.bam
print("fileout",fileout)
header = samfile.header
outfile = pysam.Samfile(fileout, "wb", header = header)
sum_Outfile_Size=0
for reads in samfile.fetch():
outfile.write(reads)
statinfo_out = os.stat(fileout)
outfile_Size = statinfo_out.st_size
if(outfile_Size >=337374182 and sum_Outfile_Size <= bamsize):
sum_Outfile_Size = sum_Outfile_Size + outfile_Size
x = x + 1
spbam_list.append(fileout_base + "_" + str(x)+ext)
outfile.close()
pysam.index(fileout)
fileout = Folder+fileout_base + "_" + str(x)+ext
print("fileout",fileout)
outfile = pysam.Samfile(fileout, "wb",header = header)
outfile.close()
pysam.index(fileout)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--windowsize",type=int, default=4 ,help='number of CGs')
parser.add_argument("-c", "--cores",type=int, default=4, help='number of cores')
parser.add_argument("--CG", default=False, action='store_true', help='Include genomic context CG')
parser.add_argument("--CHG", default=False, action='store_true', help='Include genomic context CHG')
parser.add_argument("--CHH", default=False, action='store_true', help='Include genomic context CHH')
parser.add_argument("-mC", "--mindepth",type=int, default=4, help='Minimum depth per cytosine')
parser.add_argument('-f', "--foldername", default='MeHdata', type = str, help = 'Folder name of the location of input files' )
parser.add_argument('--opt', default=False, action='store_true', help='Output original count of patterns')
parser.add_argument('-mML', "--minML",type=float,default=0.05, help='Minimum methylation level for CHG/CHH results')
args = parser.parse_args()
import sys
import time
import os
import pandas as pd
import multiprocessing
from joblib import Parallel, delayed
#num_cores = multiprocessing.cpu_count()
if __name__ == "__main__":
#open_log('MeHscreening.log')
#start = time.time()
#Folder = 'MeHdata/'
Folder = args.foldername + '/'
files = os.listdir(Folder)
bam_list = []
# all samples' bam files
for file in files:
filename, file_extension = os.path.splitext(file)
if file_extension == '.fa':
fa = filename
if file_extension == '.bam':
bam_list.append(filename)
fastafile = pysam.FastaFile('%s%s.fa' % (Folder,fa))
chromosomes=[]
for chrom in fastafile.references:
chromosomes.append(chrom)
fastafile.close()
topp = pd.DataFrame(columns=['sample','coverage','context_coverage','context'])
#CG = []
#start=t.time()
if args.CG:
con='CG'
CG=Parallel(n_jobs=args.cores)(delayed(CGgenome_scr)(bam,chrom=c,w=args.windowsize,fa=fa,mC=args.mindepth,optional=args.opt,folder=args.foldername) for bam in bam_list for c in chromosomes)
for file in bam_list:
for c in chromosomes:
res_dir = Folder + con + '_'+ file + '.csv'
toapp_dir = Folder + con + '_'+ file + '_'+ c + '.csv'
if os.path.exists(res_dir) and os.path.exists(toapp_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
elif os.path.exists(toapp_dir):
Toappend = pd.read_csv(toapp_dir)
Toappend.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
#logm("All done. "+str(len(bam_list))+" bam files processed and merged for CG.")
for i in CG:
toout=pd.DataFrame({'sample':i[0],'coverage':i[1],'context_coverage':i[2],'context':i[3]},index=[0])
topp=topp.append(toout)
#topp.groupby(['context','sample']).agg({'coverage': 'sum', 'context_coverage': 'sum'})
#print(topp)
if args.CHG:
con='CHG'
CG=Parallel(n_jobs=args.cores)(delayed(CHGgenome_scr)(bam,chrom=c,w=args.windowsize,fa=fa,mC=args.mindepth,optional=args.opt,folder=args.foldername,minML=args.minML) for bam in bam_list for c in chromosomes)
logm("Merging within samples for CHG.")
# not into bins of 400bp
for file in bam_list:
for c in chromosomes:
res_dir = Folder + con + '_'+ file + '.csv'
toapp_dir = Folder + con + '_'+ file + '_'+ c + '.csv'
if os.path.exists(res_dir) and os.path.exists(toapp_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False, header = True)
os.remove(toapp_dir)
elif os.path.exists(toapp_dir):
Toappend =
|
pd.read_csv(toapp_dir)
|
pandas.read_csv
|
import pandas as pd
from common.util.constant import TIMESTAMP, VALUE
from common.util.series import Series
class ForecastFactor:
def __init__(self, series: Series):
self.name = series.series_id
self.metrics = series.metric_id
self.tags = series.dim
self.values = pd.DataFrame(series.value)
self.values = self.values[[TIMESTAMP, VALUE]]
self.values[TIMESTAMP] =
|
pd.to_datetime(self.values[TIMESTAMP])
|
pandas.to_datetime
|
import os
import dash
import pickle
import base64
import numpy as np
import pandas as pd
from dash import dcc
from dash import html
import plotly.graph_objects as go
from dash.dependencies import Input, Output, State
CROP_IMG_PATH = r'C:\Users\HP\Downloads\Precision-Agriculture-main\Precision-Agriculture-main\crops'
DATA_PATH = r'C:\Users\HP\Downloads\Precision-Agriculture-main\Precision-Agriculture-main\Crop_recommendation.csv'
TRAINED_MODEL_PATH = r'C:\Users\HP\Downloads\Precision-Agriculture-main\Precision-Agriculture-main\KNN_model_crop_prediction.pkl'
crop_img_files = [os.path.join(CROP_IMG_PATH, f) for f in os.listdir(CROP_IMG_PATH)]
def crop_id_col(df = None):
mapping = {c : i for i, c in enumerate(list(df['label'].unique()), 1)}
df['crop_id'] = [mapping[c] for c in df['label']]
return df
# def get_sample_data(df = None, col_name = None, No_of_samples = 20):
# mapping = {c : i for i, c in enumerate(list(df['label'].unique()), 1)}
# frames = []
# for k, v in mapping.items():
# samp = df.loc[(df[col_name] ==v)].iloc[:No_of_samples]
# frames.append(samp)
# return pd.concat(frames)
def data_grouping(df):
dummy =
|
pd.DataFrame()
|
pandas.DataFrame
|
import requests
import pandas as pd
from bs4 import BeautifulSoup
from datetime import date
moeda = {
'BRL': 1,
'USD': 0,
'CAD': 0,
'JPY': 0,
'EUR': 0,
'GBP': 0,
'AUD': 0,
'RUB': 0,
'HKD': 0,
'CNY': 0,
'CHF': 0,
'SEK': 0
}
valor_total_operacoes = 0.0
valor_total_ganho = 0.0
valor_para_real = 0.0
nome_cliente = input("Digite o nome do cliente: ")
hoje = date.today()
data_da_operação = hoje.strftime("%d/%m/%Y")
req = requests.get('http://br.advfn.com/cambio/graficos/brl')
if req.status_code == 200:
content = req.content
soup = BeautifulSoup(content, 'html.parser')
table = soup.find(name='table')
table_str = str(table)
df =
|
pd.read_html(table_str)
|
pandas.read_html
|
# -*- coding: utf-8 -*-
from abc import ABC
from flask import Flask, request
import json
from utils.destinations import destinations
import swapi
from preprocess.preprocess_starship_data import PreProcessing
from starships.recommender_system import StarShipRecommendation
import pandas as pd
import os
import logging
from flask_script import Manager
# POST: /api/starships/recommend
# =====================
# --------------------------------------------------
# Content-Type: application/json
#
# { "id": 6 }
# --------------------------------------------------
# Example response:
# --------------------------------------------------
# Content-Type: application/json
# curl -H "Content-Type: application/json" -X POST http://0.0.0.0:5000/api/starships/recommend -d '{"id":9}'
# { "alternatives": [ {starship 1}, {starship 2}, {starship 3} ...] }
# --------------------------------------------------
""" Write your API endpoint here """
# GET: /api/starships/
# =====================
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = Flask(__name__)
@app.before_first_request
def __cosine_similartie__():
logger.info("Checking for cosine similarity csv")
if not os.path.exists("cosine_similarities.csv"):
logger.debug("CSV not found, starting process")
preprocessor = PreProcessing("starships")
cosine_sim = StarShipRecommendation()
logger.info("Initialising pre-processing ")
data = preprocessor.run_preprocessing().set_index("name", drop=True)
cosine_sim = cosine_sim.cosine_similarity_table(data.drop(labels=["starship_id"], axis=1),
data.drop(labels=["starship_id"], axis=1))
df = pd.DataFrame(cosine_sim, columns=data.index)
try:
df.index = data.index
df.to_csv("cosine_similarities.csv", index=True)
data[["starship_id"]].to_csv("id_to_name.csv", index=True)
return df
except Exception as e:
raise e
else:
return pd.read_csv("cosine_similarities.csv")
def recommend(starship_id):
recommending_engine = StarShipRecommendation()
df = pd.read_csv("cosine_similarities.csv")
id_to_name =
|
pd.read_csv("id_to_name.csv")
|
pandas.read_csv
|
from multiprocessing import Pool
from functools import partial
import pandas as pd
import numpy as np
from datetime import datetime
from datetime import timedelta
import logging
import numpy as np
logger = logging.getLogger(__name__)
class AccountBalances:
""" AccountBalances manages btc/usd balances used in run_backest
contains functions for subtract / add / set for each currency
Functions
---------
WARNING: THESE FUNCTIONS WILL BE DEPRECIATED:
BETTER WAY TO SET VARIABLES:
bal = AccountBalances()
bal.usd = 55.34
def sub_cur(self, val)
subtracts the val from the cur
def add_cur(self, val)
adds the val from the cur
def set_cur(self, val)
sets the val from the cur
Parameters
----------
btc : numerical, default 0
Balance of BTC
usd : numerical, default 0
Balance of USD
"""
# __init__ will initiate new class each time
def __init__(self, p_btc, p_usd):
self.usd = p_usd
self.btc = p_btc
class BacktestSettings:
def __init__(self):
self.principle_btc = 0
self.principle_usd = 0
upper_window = 0
lower_window = 0
factor_high = 0
factor_low = 0
buy_pct_usd = 0
sell_pct_btc = 0
min_usd = 0
min_btc = 0
start_date = 0
end_date = 0
def set_principle_btc(self, val):
self.principle_btc = val
def set_principle_usd(self, val):
self.principle_usd = val
def set_upper_window(self, val):
self.upper_window = val
def set_lower_window(self, val):
self.lower_window = val
def set_factor_high(self, val):
self.factor_high = val
def set_factor_low(self, val):
self.factor_low = val
def set_buy_pct_usd(self, val):
self.buy_pct_usd = val
def set_sell_pct_btc(self, val):
self.sell_pct_btc = val
def set_min_usd(self, val):
self.min_usd = val
def set_min_btc(self, val):
self.min_btc = val
def set_start_date(self, val):
self.start_date = val
def set_end_date(self, val):
self.end_date = val
def run_backtest(df, desired_outputs, bt):
""" run_backtest loops over the rows of buys and sells in df.
It calculates buys and sells and keeps a running balance of inputs.
Outputs a simplified dictionary of the results
or a DataFrame of all successfull fills.
----------
df : DataFrame,
Only needs to be buy and sells with other data removed
to increase speed
desired_outputs: string, default "both"
Toggles simple dictionary of results or
df of fill data
bt : Class: BacktestSettings(),
Contatins all required variables for running backest
"""
bal = AccountBalances(bt.principle_btc, bt.principle_usd)
fills = []
for row in list(zip(df['timestamp'], df['close'], df['buy_signal'], df['sell_signal'])):
price = row[1]
if row[2] == 1 and ((bal.usd * bt.buy_pct_usd) > bt.min_usd):
value_usd = bal.usd * bt.buy_pct_usd
value_btc = value_usd / price
value_btc = value_btc - (value_btc * .001)
bal.btc += value_btc
bal.usd -= value_usd
fills.append(create_fill(row[0], 'buy', price, value_btc, value_usd))
if row[3] == 1 and (bal.btc * bt.sell_pct_btc) > bt.min_btc:
value_btc = bal.btc * bt.sell_pct_btc
value_usd = price * value_btc
value_usd = value_usd - (value_usd * .001)
bal.usd += value_usd
bal.btc -= value_btc
fills.append(create_fill(row[0], 'sell', price, value_btc, value_usd))
num_fills = len(fills)
result = {
"n_fills": num_fills,
"upper_window": bt.upper_window,
"lower_window": bt.lower_window,
"upper_factor": bt.factor_high,
"lower_factor": bt.factor_low,
"buy_pct_usd": bt.buy_pct_usd,
"sell_pct_btc": bt.sell_pct_btc,
"usd_bal": bal.usd,
"btc_bal": bal.btc
}
if desired_outputs == "both":
fills =
|
pd.DataFrame(fills)
|
pandas.DataFrame
|
import nose
import warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u, PY3
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.core.common import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT, tslib
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if PY3 and b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % tm.rands(10)
def tearDown(self):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p, 'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
raise nose.SkipTest('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_nat(self):
nat_rec = self.encode_decode(NaT)
self.assertIs(NaT, nat_rec)
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
class TestIndex(TestPackers):
def setUp(self):
super(TestIndex, self).setUp()
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'),
('qux', 'one'), ('qux', 'two')],
names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'), Timestamp('20130103')])
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'), Timestamp(
'20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
class TestSeries(TestPackers):
def setUp(self):
super(TestSeries, self).setUp()
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
[Timestamp('20130603', tz='CET')] * 3,
'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
self.d['dt_tz_mixed'] = Series(data['F'])
self.d['dt_tz'] = Series(data['G'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestCategorical(TestPackers):
def setUp(self):
super(TestCategorical, self).setUp()
self.d = {}
self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e'])
self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'],
ordered=True)
self.d['plain_int'] = Categorical([5, 6, 7, 8])
self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True)
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_categorical_equal(i, i_rec)
class TestNDFrame(TestPackers):
def setUp(self):
super(TestNDFrame, self).setUp()
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 5,
'G': [Timestamp('20130603', tz='CET')] * 5,
'H': Categorical(['a', 'b', 'c', 'd', 'e']),
'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True),
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(data)}
self.panel = {
'float': Panel(dict(ItemA=self.frame['float'],
ItemB=self.frame['float'] + 1))}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_basic_panel(self):
for s, i in self.panel.items():
i_rec = self.encode_decode(i)
assert_panel_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
|
assert_frame_equal(self.frame[k], i_rec[k])
|
pandas.util.testing.assert_frame_equal
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
def import_data_from_origin_file():
data = pd.read_excel('2020A.xlsx').values
stock_ids = np.unique(data[:, 2])
np.random.shuffle(stock_ids)
inputs = []
labels = []
long_term_inputs = []
long_term_labels = []
count = 0
for stock_id in stock_ids:
stock_time_line = (data[data[:, 2] == stock_id])[:, 17:]
abnormal_index = np.unique(np.concatenate((np.argwhere(stock_time_line < 0)[:, 0], np.argwhere(stock_time_line[:, 0: 4] > 100)[:, 0]), axis=0))
drop = False
for index in abnormal_index:
if index == 0 or index == stock_time_line.shape[0] - 1:
continue
if np.any(stock_time_line[index, 0: 4] > 100):
drop = True
break
if np.any(stock_time_line[index - 1] < 0) or np.any(stock_time_line[index + 1] < 0):
drop = True
break
stock_time_line[index] = (stock_time_line[index - 1] + stock_time_line[index + 1]) / 2
if drop:
continue
if np.any(stock_time_line[0] < 0):
stock_time_line = np.delete(stock_time_line, 0, axis=0)
if np.any(stock_time_line[stock_time_line.shape[0] - 1] < 0):
stock_time_line = np.delete(stock_time_line, stock_time_line.shape[0] - 1, axis=0)
if count == 0 and stock_time_line.shape[0] // 31 > 1:
long_term_inputs.extend(stock_time_line[0: 30, :])
for index in range(30, 61):
long_term_labels.extend([[stock_time_line[index, 0]]])
count += 1
else:
for index in range(stock_time_line.shape[0] // 31):
inputs.extend(stock_time_line[index * 31: index * 31 + 30, :])
labels.extend([[stock_time_line[index * 31 + 30, 0]]])
inputs = np.array(inputs)
labels = np.array(labels)
|
pd.DataFrame(inputs)
|
pandas.DataFrame
|
import json
import os
import random
from random import sample
import numpy as np
import numpy.random
import re
from collections import Counter
import inspect
import pandas as pd
import matplotlib.pyplot as plt
import requests
from IPython.display import HTML
import seaborn as sns
import networkx as nx
from pylab import rcParams
try:
from wordcloud import WordCloud
except ImportError:
print("wordcloud er ikke installert, kan ikke lage ordskyer")
#************** For defining wordbag search
def dict2pd(dictionary):
res = pd.DataFrame.from_dict(dictionary).fillna(0)
s = (res.mean(axis=0))
s = s.rename('snitt')
res = res.append(s)
return res.sort_values(by='snitt', axis=1, ascending=False).transpose()
def def2dict(ddef):
res = dict()
defs = ddef.split(';')
for d in defs:
lex = d.split(':')
if len(lex) == 2:
#print('#'.join(lex))
hyper = lex[0].strip()
occurrences = [x.strip() for x in lex[1].split(',')]
res[hyper] = occurrences
for x in res:
for y in res[x]:
if y.capitalize() not in res[x]:
res[x].append(y.capitalize())
return res
def wordbag_eval(wordbag, urns):
if type(urns) is list:
if isinstance(urns[0], list):
urns = [u[0] for u in urns]
else:
urns = urns
else:
urns = [urns]
param = dict()
param['wordbags'] = wordbag
param['urns'] = urns
r = requests.post("https://api.nb.no/ngram/wordbags", json = param)
return dict2pd(r.json())
def wordbag_eval_para(wordbag, urns):
if type(urns) is list:
if isinstance(urns[0], list):
urns = [u[0] for u in urns]
else:
urns = urns
else:
urns = [urns]
param = dict()
param['wordbags'] = wordbag
param['urns'] = urns
r = requests.post("https://api.nb.no/ngram/wordbags_para", json = param)
return r.json()
def get_paragraphs(urn, paras):
"""Return paragraphs for urn"""
param = dict()
param['paragraphs'] = paras
param['urn'] = urn
r = requests.get("https://api.nb.no/ngram/paragraphs", json=param)
return dict2pd(r.json())
### ******************* wordbag search end
def ner(text = None, dist=False):
"""Analyze text for named entities - set dist = True will return the four values that go into decision"""
r = []
if text != None:
r = requests.post("https://api.nb.no/ngram/ner", json={'text':text,'dist':dist})
return r.json()
#**** names ****
def check_navn(navn, limit=2, remove='Ja Nei Nå Dem De Deres Unnskyld Ikke Ah Hmm <NAME> Jaja Jaha'.split()):
"""Removes all items in navn with frequency below limit and words in all case as well as all words in list 'remove'"""
r = {x:navn[x] for x in navn if navn[x] > limit and x.upper() != x and not x in remove}
return r
def sentences(urns, num=300):
if isinstance(urns[0], list):
urns = [str(x[0]) for x in urns]
params = {'urns':urns,
'num':num}
res = requests.get("https://api.nb.no/ngram/sentences", params=params)
return res.json()
def names(urn, ratio = 0.3, cutoff = 2):
""" Return namens in book with urn. Returns uni- , bi-, tri- and quadgrams """
if type(urn) is list:
urn = urn[0]
r = requests.get('https://api.nb.no/ngram/names', json={'urn':urn, 'ratio':ratio, 'cutoff':cutoff})
x = r.json()
result = (
Counter(x[0][0]),
Counter({tuple(x[1][i][0]):x[1][i][1] for i in range(len(x[1]))}),
Counter({tuple(x[2][i][0]):x[2][i][1] for i in range(len(x[2]))}),
Counter({tuple(x[3][i][0]):x[3][i][1] for i in range(len(x[3]))})
)
return result
def name_graph(name_struct):
m = []
for n in name_struct[0]:
m.append(frozenset([n]))
for n in name_struct[1:]:
m += [frozenset(x) for x in n]
G = []
for x in m:
for y in m:
if x < y:
G.append((' '.join(x), ' '.join(y)))
N = []
for x in m:
N.append(' '.join(x))
Gg = nx.Graph()
Gg.add_nodes_from(N)
Gg.add_edges_from(G)
return Gg
def aggregate_urns(urnlist):
"""Sum up word frequencies across urns"""
if isinstance(urnlist[0], list):
urnlist = [u[0] for u in urnlist]
r = requests.post("https://api.nb.no/ngram/book_aggregates", json={'urns':urnlist})
return r.json()
# Norweigan word bank
def word_variant(word, form):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/variant_form", params={'word':word, 'form':form})
return r.json()
def word_paradigm(word):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/paradigm", params = {'word': word})
return r.json()
def word_form(word):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/word_form", params = {'word': word})
return r.json()
def word_lemma(word):
""" Find lemma form for a given word form """
r = requests.get("https://api.nb.no/ngram/word_lemma", params = {'word': word})
return r.json()
def word_freq(urn, words):
""" Find frequency of words within urn """
params = {'urn':urn, 'words':words}
r = requests.post("https://api.nb.no/ngram/freq", json=params)
return dict(r.json())
def tot_freq(words):
""" Find total frequency of words """
params = {'words':words}
r = requests.post("https://api.nb.no/ngram/word_frequencies", json=params)
return dict(r.json())
def book_count(urns):
params = {'urns':urns}
r = requests.post("https://api.nb.no/ngram/book_count", json=params)
return dict(r.json())
def sttr(urn, chunk=5000):
r = requests.get("https://api.nb.no/ngram/sttr", json = {'urn':urn, 'chunk':chunk})
return r.json()
def totals(top=200):
r = requests.get("https://api.nb.no/ngram/totals", json={'top':top})
return dict(r.json())
def navn(urn):
if type(urn) is list:
urn = urn[0]
r = requests.get('https://api.nb.no/ngram/tingnavn', json={'urn':urn})
return dict(r.json())
def digibokurn_from_text(T):
"""Return URNs as 13 digits (any sequence of 13 digits is counted as an URN)"""
return re.findall("(?<=digibok_)[0-9]{13}", T)
def urn_from_text(T):
"""Return URNs as 13 digits (any sequence of 13 digits is counted as an URN)"""
return re.findall("[0-9]{13}", T)
def metadata(urn=None):
urns = pure_urn(urn)
#print(urns)
r = requests.post("https://api.nb.no/ngram/meta", json={'urn':urns})
return r.json()
def pure_urn(data):
"""Convert URN-lists with extra data into list of serial numbers.
Args:
data: May be a list of URNs, a list of lists with URNs as their
initial element, or a string of raw texts containing URNs
Any pandas dataframe or series. Urns must be in the first column of dataframe.
Returns:
List[str]: A list of URNs. Empty list if input is on the wrong
format or contains no URNs
"""
korpus_def = []
if isinstance(data, list):
if not data: # Empty list
korpus_def = []
if isinstance(data[0], list): # List of lists
try:
korpus_def = [str(x[0]) for x in data]
except IndexError:
korpus_def = []
else: # Assume data is already a list of URNs
korpus_def = [str(int(x)) for x in data]
elif isinstance(data, str):
korpus_def = [str(x) for x in urn_from_text(data)]
elif isinstance(data, (int, np.integer)):
korpus_def = [str(data)]
elif isinstance(data, pd.DataFrame):
col = data.columns[0]
urns = pd.to_numeric(data[col])
korpus_def = [str(int(x)) for x in urns.dropna()]
elif isinstance(data, pd.Series):
korpus_def = [str(int(x)) for x in data.dropna()]
return korpus_def
#### N-Grams from fulltext updated
def unigram(word, period=(1950, 2020), media = 'bok', ddk=None, topic=None, gender=None, publisher=None, lang=None, trans=None):
r = requests.get("https://api.nb.no/ngram/unigrams", params={
'word':word,
'ddk':ddk,
'topic':topic,
'gender':gender,
'publisher':publisher,
'lang':lang,
'trans':trans,
'period0':period[0],
'period1':period[1],
'media':media
})
return frame(dict(r.json()))
def bigram(first,second, period=(1950, 2020), media = 'bok', ddk=None, topic=None, gender=None, publisher=None, lang=None, trans=None):
r = requests.get("https://api.nb.no/ngram/bigrams", params={
'first':first,
'second':second,
'ddk':ddk,
'topic':topic,
'gender':gender,
'publisher':publisher,
'lang':lang,
'trans':trans,
'period0':period[0],
'period1':period[1],
'media':media
})
return frame(dict(r.json()))
def book_counts(period=(1800, 2050)):
r = requests.get("https://api.nb.no/ngram/book_counts", params={
'period0':period[0],
'period1':period[1],
})
return frame(dict(r.json()))
####
def difference(first, second, rf, rs, years=(1980, 2000),smooth=1, corpus='bok'):
"""Compute difference of difference (first/second)/(rf/rs)"""
try:
a_first = nb_ngram(first, years=years, smooth=smooth, corpus=corpus)
a_second = nb_ngram(second, years=years, smooth=smooth, corpus=corpus)
a = a_first.join(a_second)
b_first = nb_ngram(rf, years=years, smooth=smooth, corpus=corpus)
b_second = nb_ngram(rs, years=years, smooth=smooth, corpus=corpus)
if rf == rs:
b_second.columns = [rs + '2']
b = b_first.join(b_second)
s_a = a.mean()
s_b = b.mean()
f1 = s_a[a.columns[0]]/s_a[a.columns[1]]
f2 = s_b[b.columns[0]]/s_b[b.columns[1]]
res = f1/f2
except:
res = 'Mangler noen data - har bare for: ' + ', '.join([x for x in a.columns.append(b.columns)])
return res
def df_combine(array_df):
"""Combine one columns dataframes"""
import pandas as pd
cols = []
for i in range(len(a)):
#print(i)
if array_df[i].columns[0] in cols:
array_df[i].columns = [array_df[i].columns[0] + '_' + str(i)]
cols.append(array_df[i].columns[0])
return pd.concat(a, axis=1, sort=True)
def col_agg(df, col='sum'):
c = df.sum(axis=0)
c = pd.DataFrame(c)
c.columns = [col]
return c
def row_agg(df, col='sum'):
c = df.sum(axis=1)
c = pd.DataFrame(c)
c.columns = [col]
return c
def get_freq(urn, top=50, cutoff=3):
"""Get frequency list for urn"""
if isinstance(urn, list):
urn = urn[0]
r = requests.get("https://api.nb.no/ngram/urnfreq", json={'urn':urn, 'top':top, 'cutoff':cutoff})
return Counter(dict(r.json()))
####=============== GET URNS ==================##########
def book_corpus(words = None, author = None,
title = None, subtitle = None, ddk = None, subject = None,
period=(1100, 2020),
gender=None,
lang = None,
trans= None,
limit=20 ):
return frame(book_urn(words, author, title, subtitle, ddk, subject, period, gender, lang, trans, limit),
"urn author title year".split())
def book_urn(words = None, author = None,
title = None, subtitle = None, ddk = None, subject = None,
period=(1100, 2020),
gender=None,
lang = None,
trans= None,
limit=20 ):
"""Get URNs for books with metadata"""
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'period'}
query['year'] = period[0]
query['next'] = period[1] - period[0]
return get_urn(query)
def unique_urns(korpus, newest=True):
author_title = {(c[1],c[2]) for c in korpus}
corpus = {(c[0], c[1]):[d for d in korpus if c[0] == d[1] and c[1]==d[2]] for c in author_title }
for c in corpus:
corpus[c].sort(key=lambda c: c[3])
if newest == True:
res = [corpus[c][-1] for c in corpus]
else:
res = [corpus[c][0] for c in corpus]
return res
def refine_book_urn(urns = None, words = None, author = None,
title = None, ddk = None, subject = None, period=(1100, 2020), gender=None, lang = None, trans= None, limit=20 ):
"""Refine URNs for books with metadata"""
# if empty urns nothing to refine
if urns is None or urns == []:
return []
# check if urns is a metadata list, and pick out first elements if that is the case
if isinstance(urns[0], list):
urns = [x[0] for x in urns]
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'period' and i != 'urns'}
query['year'] = period[0]
query['next'] = period[1] - period[0]
#print(query)
return refine_urn(urns, query)
def best_book_urn(word = None, author = None,
title = None, ddk = None, subject = None, period=(1100, 2020), gender=None, lang = None, trans= None, limit=20 ):
"""Get URNs for books with metadata"""
if word is None:
return []
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'period' and i != 'word'}
query['year'] = period[0]
query['next'] = period[1] - period[0]
return get_best_urn(word, query)
def get_urn(metadata=None):
"""Get urns from metadata"""
if metadata is None:
metadata = {}
if not ('next' in metadata or 'neste' in metadata):
metadata['next'] = 100
if not 'year' in metadata:
metadata['year'] = 1900
r = requests.get('https://api.nb.no/ngram/urn', json=metadata)
return r.json()
def refine_urn(urns, metadata=None):
"""Refine a list urns using extra information"""
if metadata is None:
metadata = {}
metadata['urns'] = urns
if not ('words' in metadata):
metadata['words'] = []
if not ('next' in metadata or 'neste' in metadata):
metadata['next'] = 520
if not 'year' in metadata:
metadata['year'] = 1500
r = requests.post('https://api.nb.no/ngram/refineurn', json=metadata)
return r.json()
def get_best_urn(word, metadata=None):
"""Get the best urns from metadata containing a specific word"""
metadata['word'] = word
if not ('next' in metadata or 'neste' in metadata):
metadata['next'] = 600
if not 'year' in metadata:
metadata['year'] = 1500
r = requests.get('https://api.nb.no/ngram/best_urn', json=metadata)
return r.json()
def get_papers(top=5, cutoff=5, navn='%', yearfrom=1800, yearto=2020, samplesize=100):
"""Get newspapers"""
div = lambda x, y: (int(x/y), x % y)
chunks = 20
# split samplesize into chunks, go through the chunks and then the remainder
(first, second) = div(samplesize, chunks)
r = []
# collect chunkwise
for i in range(first):
r += requests.get("https://api.nb.no/ngram/avisfreq", json={'navn':navn, 'top':top, 'cutoff':cutoff,
'yearfrom':yearfrom, 'yearto':yearto,'samplesize':chunks}
).json()
# collect the remainder
r += requests.get("https://api.nb.no/ngram/avisfreq", json={'navn':navn, 'top':top, 'cutoff':cutoff,
'yearfrom':yearfrom, 'yearto':yearto,'samplesize':second}
).json()
return [dict(x) for x in r]
def urn_coll(word, urns=[], after=5, before=5, limit=1000):
"""Find collocations for word in a set of book URNs. Only books at the moment"""
if isinstance(urns[0], list): # urns assumed to be list of list with urn-serial as first element
urns = [u[0] for u in urns]
r = requests.post("https://api.nb.no/ngram/urncoll", json={'word':word, 'urns':urns,
'after':after, 'before':before, 'limit':limit})
res = pd.DataFrame.from_dict(r.json(), orient='index')
if not res.empty:
res = res.sort_values(by=res.columns[0], ascending = False)
return res
def urn_coll_words(words, urns=None, after=5, before=5, limit=1000):
"""Find collocations for a group of words within a set of books given by a list of URNs. Only books at the moment"""
coll = pd.DataFrame()
if urns != None:
if isinstance(urns[0], list): # urns assumed to be list of list with urn-serial as first element
urns = [u[0] for u in urns]
colls = Counter()
if isinstance(words, str):
words = words.split()
res = Counter()
for word in words:
try:
res += Counter(
requests.post(
"https://api.nb.no/ngram/urncoll",
json={
'word':word,
'urns':urns,
'after':after,
'before':before,
'limit':limit}
).json()
)
except:
True
coll = pd.DataFrame.from_dict(res, orient='index')
if not coll.empty:
coll = coll.sort_values(by=coll.columns[0], ascending = False)
return coll
def get_aggregated_corpus(urns, top=0, cutoff=0):
res = Counter()
if isinstance(urns[0], list): # urns assumed to be list of list with urn-serial as first element
urns = [u[0] for u in urns]
for u in urns:
#print(u)
res += get_freq(u, top = top, cutoff = cutoff)
return pd.DataFrame.from_dict(res, orient='index').sort_values(by=0, ascending = False)
def compare_word_bags(bag_of_words, another_bag_of_words, first_freq = 0, another_freq = 1, top=100, first_col = 0, another_col= 0):
"""Compare two columns taken from two or one frame. Parameters x_freq are frequency limits used to cut down candidate words
from the bag of words. Compare along the columns where first_col and another_col are column numbers. Typical situation is that
bag_of_words is a one column frame and another_bag_of_words is another one column frame. When the columns are all from one frame,
just change column numbers to match the columns"""
diff = bag_of_words[bag_of_words > first_freq][bag_of_words.columns[first_col]]/another_bag_of_words[another_bag_of_words > another_freq][another_bag_of_words.columns[another_col]]
return frame(diff, 'diff').sort_values(by='diff', ascending=False)[:top]
def collocation(
word,
yearfrom=2010,
yearto=2018,
before=3,
after=3,
limit=1000,
corpus='avis',
lang='nob',
title='%',
ddk='%',
subtitle='%'):
"""Defined collects frequencies for a given word"""
data = requests.get(
"https://api.nb.no/ngram/collocation",
params={
'word':word,
'corpus':corpus,
'yearfrom':yearfrom,
'before':before,
'after':after,
'limit':limit,
'yearto':yearto,
'title':title,
'ddk':ddk,
'subtitle':subtitle}).json()
return pd.DataFrame.from_dict(data['freq'], orient='index')
def collocation_data(words, yearfrom = 2000, yearto = 2005, limit = 1000, before = 5, after = 5, title = '%', corpus='bok'):
"""Collocation for a set of words sum up all the collocations words is a list of words or a blank separated string of words"""
import sys
a = dict()
if isinstance(words, str):
words = words.split()
for word in words:
print(word)
try:
a[word] = collocation(
word,
yearfrom = yearfrom, yearto = yearto, limit = limit,
corpus = corpus, before = before,
after = after, title = title
)
a[word].columns = [word]
except:
print(word, ' feilsituasjon', sys.exc_info())
result = pd.DataFrame()
for w in a:
result = result.join(a[w], how='outer')
return pd.DataFrame(result.sum(axis=1)).sort_values(by=0, ascending=False)
class CollocationCorpus:
from random import sample
def __init__(self, corpus = None, name='', maximum_texts = 500):
urns = pure_urn(corpus)
if len(urns) > maximum_texts:
selection = random(urns, maximum_texts)
else:
selection = urns
self.corpus_def = selection
self.corpus = get_aggregated_corpus(self.corpus_def, top=0, cutoff=0)
def summary(self, head=10):
info = {
'corpus_definition':self.corpus[:head],
'number_of_words':len(self.corpus)
}
return info
def collocation_old(word, yearfrom=2010, yearto=2018, before=3, after=3, limit=1000, corpus='avis'):
data = requests.get(
"https://api.nb.no/ngram/collocation",
params={
'word':word,
'corpus':corpus,
'yearfrom':yearfrom,
'before':before,
'after':after,
'limit':limit,
'yearto':yearto}).json()
return pd.DataFrame.from_dict(data['freq'], orient='index')
def heatmap(df, color='green'):
return df.fillna(0).style.background_gradient(cmap=sns.light_palette(color, as_cmap=True))
def get_corpus_text(urns, top = 0, cutoff=0):
k = dict()
if isinstance(urns, list):
# a list of urns, or a korpus with urns as first element
if isinstance(urns[0], list):
urns = [u[0] for u in urns]
else:
# assume it is a single urn, text or number
urns = [urns]
for u in urns:
#print(u)
k[u] = get_freq(u, top = top, cutoff = cutoff)
df = pd.DataFrame(k)
res = df.sort_values(by=df.columns[0], ascending=False)
return res
def normalize_corpus_dataframe(df):
colsums = df.sum()
for x in colsums.index:
#print(x)
df[x] = df[x].fillna(0)/colsums[x]
return True
def show_korpus(korpus, start=0, size=4, vstart=0, vsize=20, sortby = ''):
"""Show corpus as a panda dataframe
start = 0 indicates which dokument to show first, dataframe is sorted according to this
size = 4 how many documents (or columns) are shown
top = 20 how many words (or rows) are shown"""
if sortby != '':
val = sortby
else:
val = korpus.columns[start]
return korpus[korpus.columns[start:start+size]].sort_values(by=val, ascending=False)[vstart:vstart + vsize]
def aggregate(korpus):
"""Make an aggregated sum of all documents across the corpus, here we use average"""
return pd.DataFrame(korpus.fillna(0).mean(axis=1))
def convert_list_of_freqs_to_dataframe(referanse):
"""The function get_papers() returns a list of frequencies - convert it"""
res = []
for x in referanse:
res.append( dict(x))
result = pd.DataFrame(res).transpose()
normalize_corpus_dataframe(result)
return result
def get_corpus(top=0, cutoff=0, navn='%', corpus='avis', yearfrom=1800, yearto=2020, samplesize=10):
if corpus == 'avis':
result = get_papers(top=top, cutoff=cutoff, navn=navn, yearfrom=yearfrom, yearto=yearto, samplesize=samplesize)
res = convert_list_of_freqs_to_dataframe(result)
else:
urns = get_urn({'author':navn, 'year':yearfrom, 'neste':yearto-yearfrom, 'limit':samplesize})
res = get_corpus_text([x[0] for x in urns], top=top, cutoff=cutoff)
return res
class Cluster:
def __init__(self, word = '', filename = '', period = (1950,1960) , before = 5, after = 5, corpus='avis', reference = 200,
word_samples=1000):
if word != '':
self.collocates = collocation(word, yearfrom=period[0], yearto = period[1], before=before, after=after,
corpus=corpus, limit=word_samples)
self.collocates.columns = [word]
if type(reference) is pd.core.frame.DataFrame:
reference = reference
elif type(reference) is int:
reference = get_corpus(yearfrom=period[0], yearto=period[1], corpus=corpus, samplesize=reference)
else:
reference = get_corpus(yearfrom=period[0], yearto=period[1], corpus=corpus, samplesize=int(reference))
self.reference = aggregate(reference)
self.reference.columns = ['reference_corpus']
self.word = word
self.period = period
self.corpus = corpus
else:
if filename != '':
self.load(filename)
def cluster_set(self, exponent=1.1, top = 200, aslist=True):
combo_corp = self.reference.join(self.collocates, how='outer')
normalize_corpus_dataframe(combo_corp)
korpus = compute_assoc(combo_corp, self.word, exponent)
korpus.columns = [self.word]
if top <= 0:
res = korpus.sort_values(by=self.word, ascending=False)
else:
res = korpus.sort_values(by=self.word, ascending=False).iloc[:top]
if aslist == True:
res = HTML(', '.join(list(res.index)))
return res
def add_reference(self, number=20):
ref = get_corpus(yearfrom=self.period[0], yearto=self.period[1], samplesize=number)
ref = aggregate(ref)
ref.columns = ['add_ref']
normalize_corpus_dataframe(ref)
self.reference = aggregate(self.reference.join(ref, how='outer'))
return True
def save(self, filename=''):
if filename == '':
filename = "{w}_{p}-{q}.json".format(w=self.word,p=self.period[0], q = self.period[1])
model = {
'word':self.word,
'period':self.period,
'reference':self.reference.to_dict(),
'collocates':self.collocates.to_dict(),
'corpus':self.corpus
}
with open(filename, 'w', encoding = 'utf-8') as outfile:
print('lagrer til:', filename)
outfile.write(json.dumps(model))
return True
def load(self, filename):
with open(filename, 'r') as infile:
try:
model = json.loads(infile.read())
#print(model['word'])
self.word = model['word']
self.period = model['period']
self.corpus = model['corpus']
self.reference = pd.DataFrame(model['reference'])
self.collocates = pd.DataFrame(model['collocates'])
except:
print('noe gikk galt')
return True
def search_words(self, words, exponent=1.1):
if type(words) is str:
words = [w.strip() for w in words.split()]
df = self.cluster_set(exponent=exponent, top=0, aslist=False)
sub= [w for w in words if w in df.index]
res = df.transpose()[sub].transpose().sort_values(by=df.columns[0], ascending=False)
return res
def wildcardsearch(params=None):
if params is None:
params = {'word': '', 'freq_lim': 50, 'limit': 50, 'factor': 2}
res = requests.get('https://api.nb.no/ngram/wildcards', params=params)
if res.status_code == 200:
result = res.json()
else:
result = {'status':'feil'}
resultat = pd.DataFrame.from_dict(result, orient='index')
if not(resultat.empty):
resultat.columns = [params['word']]
return resultat
def sorted_wildcardsearch(params):
res = wildcardsearch(params)
if not res.empty:
res = res.sort_values(by=params['word'], ascending=False)
return res
def make_newspaper_network(key, wordbag, titel='%', yearfrom='1980', yearto='1990', limit=500):
if type(wordbag) is str:
wordbag = wordbag.split()
r = requests.post("https://api.nb.no/ngram/avisgraph", json={
'key':key,
'words':wordbag,
'yearto':yearto,
'yearfrom':yearfrom,
'limit':limit})
G = nx.Graph()
if r.status_code == 200:
G.add_weighted_edges_from([(x,y,z) for (x,y,z) in r.json() if z > 0 and x != y])
else:
print(r.text)
return G
def make_network(urn, wordbag, cutoff=0):
if type(urn) is list:
urn = urn[0]
if type(wordbag) is str:
wordbag = wordbag.split()
G = make_network_graph(urn, wordbag, cutoff)
return G
def make_network_graph(urn, wordbag, cutoff=0):
r = requests.post("https://api.nb.no/ngram/graph", json={'urn':urn, 'words':wordbag})
G = nx.Graph()
G.add_weighted_edges_from([(x,y,z) for (x,y,z) in r.json() if z > cutoff and x != y])
return G
def make_network_name_graph(urn, tokens, tokenmap=None, cutoff=2):
if isinstance(urn, list):
urn = urn[0]
# tokens should be a list of list of tokens. If it is list of dicts pull out the keys (= tokens)
if isinstance(tokens[0], dict):
tokens = [list(x.keys()) for x in tokens]
r = requests.post("https://api.nb.no/ngram/word_graph", json={'urn':urn, 'tokens':tokens, 'tokenmap':tokenmap})
#print(r.text)
G = nx.Graph()
G.add_weighted_edges_from([(x,y,z) for (x,y,z) in r.json() if z > cutoff and x != y])
return G
def token_convert_back(tokens, sep='_'):
""" convert a list of tokens to string representation"""
res = [tokens[0]]
for y in tokens:
res.append([tuple(x.split(sep)) for x in y])
l = len(res)
for x in range(1, 4-l):
res.append([])
return res
def token_convert(tokens, sep='_'):
""" convert back to tuples """
tokens = [list(x.keys()) for x in tokens]
tokens = [[(x,) for x in tokens[0]], tokens[1], tokens[2], tokens[3]]
conversion = []
for x in tokens:
conversion.append([sep.join(t) for t in x])
return conversion
def token_map_to_tuples(tokens_as_strings, sep='_', arrow='==>'):
tuples = []
for x in tokens_as_strings:
token = x.split(arrow)[0].strip()
mapsto = x.split(arrow)[1].strip()
tuples.append((tuple(token.split(sep)), tuple(mapsto.split(sep))))
#tuples = [(tuple(x.split(arrow).strip()[0].split(sep)), tuple(x.split(arrow)[1].strip().split(sep))) for x in tokens_as_strings]
return tuples
def token_map(tokens, strings=False, sep='_', arrow= '==>'):
""" tokens as from nb.names()"""
if isinstance(tokens[0], dict):
# get the keys(), otherwise it is already just a list of tokens up to length 4
tokens = [list(x.keys()) for x in tokens]
# convert tokens to tuples and put them all in one list
tokens = [(x,) for x in tokens[0]] + tokens[1] + tokens[2] + tokens[3]
tm = []
#print(tokens)
for token in tokens:
if isinstance(token, str):
trep = (token,)
elif isinstance(token, list):
trep = tuple(token)
token = tuple(token)
else:
trep = token
n = len(trep)
#print(trep)
if trep[-1].endswith('s'):
cp = list(trep[:n-1])
cp.append(trep[-1][:-1])
cp = tuple(cp)
#print('copy', cp, trep)
if cp in tokens:
#print(trep, cp)
trep = cp
larger = [ts for ts in tokens if set(ts) >= set(trep)]
#print(trep, ' => ', larger)
larger.sort(key=lambda x: len(x), reverse=True)
tm.append((token,larger[0]))
res = tm
if strings == True:
res = [sep.join(x[0]) + ' ' + arrow + ' ' + sep.join(x[1]) for x in tm]
return res
def draw_graph_centrality(G, h=15, v=10, fontsize=20, k=0.2, arrows=False, font_color='black', threshold=0.01):
node_dict = nx.degree_centrality(G)
subnodes = dict({x:node_dict[x] for x in node_dict if node_dict[x] >= threshold})
x, y = rcParams['figure.figsize']
rcParams['figure.figsize'] = h, v
pos =nx.spring_layout(G, k=k)
ax = plt.subplot()
ax.set_xticks([])
ax.set_yticks([])
G = G.subgraph(subnodes)
nx.draw_networkx_labels(G, pos, font_size=fontsize, font_color=font_color)
nx.draw_networkx_nodes(G, pos, alpha=0.5, nodelist=subnodes.keys(), node_size=[v * 1000 for v in subnodes.values()])
nx.draw_networkx_edges(G, pos, alpha=0.7, arrows=arrows, edge_color='lightblue', width=1)
rcParams['figure.figsize'] = x, y
return True
def combine(clusters):
"""Make new collocation analyses from data in clusters"""
colls = []
collocates = clusters[0].collocates
for c in clusters[1:]:
collocates = collocates.join(c.collocates, rsuffix='-' + str(c.period[0]))
return collocates
def cluster_join(cluster):
clusters = [cluster[i] for i in cluster]
clst = clusters[0].cluster_set(aslist=False)
for c in clusters[1:]:
clst = clst.join(c.cluster_set(aslist=False), rsuffix = '_'+str(c.period[0]))
return clst
def serie_cluster(word, startår, sluttår, inkrement, before=5, after=5, reference=150, word_samples=500):
tidscluster = dict()
for i in range(startår, sluttår, inkrement):
tidscluster[i] = Cluster(
word,
corpus='avis',
period=(i, i + inkrement - 1),
before=after,
after=after,
reference=reference,
word_samples=word_samples)
print(i, i+inkrement - 1)
return tidscluster
def save_serie_cluster(tidscluster):
for i in tidscluster:
tidscluster[i].save()
return 'OK'
def les_serie_cluster(word, startår, sluttår, inkrement):
tcluster = dict()
for i in range(startår, sluttår, inkrement):
print(i, i+inkrement - 1)
tcluster[i] = Cluster(filename='{w}_{f}-{t}.json'.format(w=word, f=i,t=i+inkrement - 1))
return tcluster
def make_cloud(json_text, top=100, background='white', stretch=lambda x: 2**(10*x), width=500, height=500, font_path=None):
pairs0 = Counter(json_text).most_common(top)
pairs = {x[0]:stretch(x[1]) for x in pairs0}
wc = WordCloud(
font_path=font_path,
background_color=background,
width=width,
#color_func=my_colorfunc,
ranks_only=True,
height=height).generate_from_frequencies(pairs)
return wc
def draw_cloud(sky, width=20, height=20, fil=''):
plt.figure(figsize=(width,height))
plt.imshow(sky, interpolation='bilinear')
figplot = plt.gcf()
if fil != '':
figplot.savefig(fil, format='png')
return
def cloud(pd, column='', top=200, width=1000, height=1000, background='black', file='', stretch=10, font_path=None):
if column == '':
column = pd.columns[0]
data = json.loads(pd[column].to_json())
a_cloud = make_cloud(data, top=top,
background=background, font_path=font_path,
stretch=lambda x: 2**(stretch*x), width=width, height=height)
draw_cloud(a_cloud, fil=file)
return
def make_a_collocation(word, period=(1990, 2000), before=5, after=5, corpus='avis', samplesize=100, limit=2000):
collocates = collocation(word, yearfrom=period[0], yearto=period[1], before=before, after=after,
corpus=corpus, limit=limit)
collocates.columns = [word]
reference = get_corpus(yearfrom=period[0], yearto=period[1], samplesize=samplesize)
ref_agg = aggregate(reference)
ref_agg.columns = ['reference_corpus']
return ref_agg
def compute_assoc(coll_frame, column, exponent=1.1, refcolumn = 'reference_corpus'):
return pd.DataFrame(coll_frame[column]**exponent/coll_frame.mean(axis=1))
class Corpus:
def __init__(self, filename = '', target_urns = None, reference_urns = None, period = (1950,1960), author='%',
title='%', ddk='%', gender='%', subject='%', reference = 100, max_books=100):
params = {
'year':period[0],
'next': period[1]-period[0],
'subject':subject,
'ddk':ddk,
'author':author,
#'gender':gender, ser ikke ut til å virke for get_urn - sjekk opp APIet
'title':title,
'limit':max_books,
'reference':reference
}
self.params = params
self.coll = dict()
self.coll_graph = dict()
if filename == '':
if target_urns != None:
målkorpus_def = target_urns
else:
målkorpus_def = get_urn(params)
#print("Antall bøker i målkorpus ", len(målkorpus_def))
if isinstance(målkorpus_def[0], list):
målkorpus_urn = [str(x[0]) for x in målkorpus_def]
#print(målkorpus_urn)
else:
målkorpus_urn = målkorpus_def
if len(målkorpus_urn) > max_books and max_books > 0:
target_urn = list(numpy.random.choice(målkorpus_urn, max_books))
else:
target_urn = målkorpus_urn
if reference_urns != None:
referansekorpus_def = reference_urns
else:
# select from period, usually used only of target is by metadata
referansekorpus_def = get_urn({'year':period[0], 'next':period[1]-period[0], 'limit':reference})
#print("<NAME> i referanse: ", len(referansekorpus_def))
# referansen skal være distinkt fra målkorpuset
referanse_urn = [str(x[0]) for x in referansekorpus_def]
self.reference_urn = referanse_urn
self.target_urn = target_urn
# make sure there is no overlap between target and reference
#
referanse_urn = list(set(referanse_urn) - set(target_urn))
målkorpus_txt = get_corpus_text(target_urn)
normalize_corpus_dataframe(målkorpus_txt)
if referanse_urn != []:
referanse_txt = get_corpus_text(referanse_urn)
normalize_corpus_dataframe(referanse_txt)
combo = målkorpus_txt.join(referanse_txt)
else:
referanse_txt = målkorpus_txt
combo = målkorpus_txt
self.combo = combo
self.reference = referanse_txt
self.target = målkorpus_txt
self.reference = aggregate(self.reference)
self.reference.columns = ['reference_corpus']
## dokumentfrekvenser
mål_docf = pd.DataFrame(pd.DataFrame(målkorpus_txt/målkorpus_txt).sum(axis=1))
combo_docf = pd.DataFrame(pd.DataFrame(combo/combo).sum(axis=1))
ref_docf = pd.DataFrame(pd.DataFrame(referanse_txt/referanse_txt).sum(axis=1))
### Normaliser dokumentfrekvensene
normalize_corpus_dataframe(mål_docf)
normalize_corpus_dataframe(combo_docf)
normalize_corpus_dataframe(ref_docf)
self.målkorpus_tot = aggregate(målkorpus_txt)
self.combo_tot = aggregate(combo)
self.mål_docf = mål_docf
self.combo_docf = combo_docf
self.lowest = self.combo_tot.sort_values(by=0)[0][0]
else:
self.load(filename)
return
def difference(self, freq_exp=1.1, doc_exp=1.1, top = 200, aslist=True):
res = pd.DataFrame(
(self.målkorpus_tot**freq_exp/self.combo_tot)*(self.mål_docf**doc_exp/self.combo_docf)
)
res.columns = ['diff']
if top > 0:
res = res.sort_values(by=res.columns[0], ascending=False).iloc[:top]
else:
res = res.sort_values(by=res.columns[0], ascending=False)
if aslist == True:
res = HTML(', '.join(list(res.index)))
return res
def save(self, filename):
model = {
'params':self.params,
'target': self.målkorpus_tot.to_json(),
'combo': self.combo_tot.to_json(),
'target_df': self.mål_docf.to_json(),
'combo_df': self.combo_docf.to_json()
}
with open(filename, 'w', encoding = 'utf-8') as outfile:
outfile.write(json.dumps(model))
return True
def load(self, filename):
with open(filename, 'r') as infile:
try:
model = json.loads(infile.read())
#print(model['word'])
self.params = model['params']
#print(self.params)
self.målkorpus_tot = pd.read_json(model['target'])
#print(self.målkorpus_tot[:10])
self.combo_tot = pd.read_json(model['combo'])
self.mål_docf = pd.read_json(model['target_df'])
self.combo_docf = pd.read_json(model['combo_df'])
except:
print('noe gikk galt')
return True
def collocations(self, word, after=5, before=5, limit=1000):
"""Find collocations for word in a set of book URNs. Only books at the moment"""
r = requests.post(
"https://api.nb.no/ngram/urncoll",
json={
'word': word,
'urns': self.target_urn,
'after': after,
'before': before,
'limit': limit
}
)
temp = pd.DataFrame.from_dict(r.json(), orient='index')
normalize_corpus_dataframe(temp)
self.coll[word] = temp.sort_values(by = temp.columns[0], ascending = False)
return True
def conc(self, word, before=8, after=8, size=10, combo=0):
if combo == 0:
urns = self.target_urn + self.reference_urn
elif combo == 1:
urns = self.target_urn
else:
urns = self.reference_urn
if len(urns) > 300:
urns = list(numpy.random.choice(urns, 300, replace=False))
return get_urnkonk(word, {'urns':urns, 'before':before, 'after':after, 'limit':size})
def sort_collocations(self, word, comparison = None, exp = 1.0, above = None):
if comparison == None:
comparison = self.combo_tot[0]
try:
res =
|
pd.DataFrame(self.coll[word][0]**exp/comparison)
|
pandas.DataFrame
|
"""Creates the Windfarm class/model."""
import os # type: ignore
import numpy as np
import pandas as pd # type: ignore
import logging # type: ignore
import networkx as nx # type: ignore
from math import fsum
from geopy import distance # type: ignore
from itertools import chain, combinations
from wombat.core import RepairManager, WombatEnvironment
from wombat.core.library import load_yaml
from wombat.windfarm.system import Cable, System
class Windfarm:
"""The primary class for operating on objects within a windfarm. The substations,
cables, and turbines are created as a network object to be more appropriately accessed
and controlled.
"""
def __init__(
self,
env: WombatEnvironment,
windfarm_layout: str,
repair_manager: RepairManager,
) -> None:
self.env = env
self.repair_manager = repair_manager
# self._logging_setup()
self._create_graph_layout(windfarm_layout)
self._create_turbines_and_substations()
self._create_cables()
self.capacity = sum(self.node_system(turb).capacity for turb in self.turbine_id)
self._create_substation_turbine_map()
self.calculate_distance_matrix()
self.repair_manager._register_windfarm(self)
self.env.process(self._log_operations())
def _logging_setup(self) -> None:
"""Completes the setup for logging data.
Parameters
----------
which : str
One of "events" or "operations". For creating event logs or operational
status logs.
"""
logging.basicConfig(
format="%(asctime)s :: %(name)s :: %(levelname)s :: %(message)s",
filename=self.env.operations_log_fname,
filemode="w",
level=logging.DEBUG,
)
self._operations_logger = logging.getLogger(__name__)
def _create_graph_layout(self, windfarm_layout: str) -> None:
"""Creates a network layout of the windfarm start from the substation(s) to
be able to capture downstream turbines that can be cut off in the event of a cable failure.
Parameters
----------
windfarm_layout : str
Filename to use for reading in the windfarm layout; must be a csv file.
"""
layout_path = str(self.env.data_dir / "windfarm" / windfarm_layout)
layout = (
|
pd.read_csv(layout_path)
|
pandas.read_csv
|
from argparse import ArgumentParser
import os
import json
import numpy as np
import pandas as pd
from tqdm import tqdm
RT_PERCENTILES = {
"Rt_2_5": 2.5,
"Rt_10": 10,
"Rt_20": 20,
"Rt_25": 25,
"Rt_30": 30,
"Rt_40": 40,
"Rt_50": 50,
"Rt_60": 60,
"Rt_70": 70,
"Rt_75": 75,
"Rt_80": 80,
"Rt_90": 90,
"Rt_97_5": 97.5,
}
CASE_PERCENTILES = {
"C_025": 2.5,
"C_25": 25,
"C_50": 50,
"C_75": 75,
"C_975": 97.5,
}
def get_area_code(fpath):
fname = os.path.split(fpath)[-1]
return int(fname.split("_")[0])
def make_dfs(
fpaths, region_codes, percs_dct, start_date, weeks_modelled, forecast_days
):
start_ts = pd.Timestamp(start_date)
end_ts = (
start_ts
+ pd.Timedelta(weeks_modelled, unit="W")
+
|
pd.Timedelta(forecast_days - 1, unit="D")
|
pandas.Timedelta
|
import requests
import datetime
import xml.etree.ElementTree as ET
from dateutil.relativedelta import relativedelta
import pandas as pd
import pathlib
import os
from bs4 import BeautifulSoup
ENDPOINT = "https://ssl.orpak.com/api40/TrackTecPublic/PublicService.asmx/ExecuteCommand"
cur_date = datetime.datetime.now().date()
start_date = cur_date -relativedelta(years=1)
params = """
<Paramaters>
<ClientID></ClientID>
<CommandName>GetEventsHistory</CommandName>
<ResultType>DEFAULT</ResultType>
<DeviceIDs>81B16GBD5D00251</DeviceIDs>
<SourceIDs>9,10,11,12,49,48,52</SourceIDs>
<StartDate>2018/02/15 00:00:00</StartDate>
<EndDate>2018/02/16 00:00:00</EndDate>
<PageIndex>1</PageIndex>
<PageSize>10000</PageSize>
</Paramaters>
"""
def querylist_builder():
ret = [] # make an empty list to start throwing stuff onto
q_start_date = start_date
while q_start_date < cur_date:
query_date = q_start_date.strftime("%Y/%m/%d") + " 00:00:00"
end_date = (q_start_date + relativedelta(days=1)).strftime("%Y/%m/%d") + " 00:00:00"
ret.append(params.format(query_date, end_date))
q_start_date += relativedelta(days=1)
return ret
def extract():
"""
Extracts and saves info for all queries in querylist_builder
to a /tmp folder
"""
queries = querylist_builder()
pathlib.Path('/tmp/street_data').mkdir(parents=True, exist_ok=True)
for i,q in enumerate(queries):
print("running extract query")
url = ENDPOINT + "?CommandData=" + q
print(url)
r = requests.get(url)
text_file = open("/tmp/street_data/" + str(i) + ".xml", 'w')
data = r.text
print(data)
text_file.write(data)
print("data saved for {}".format(str(i)))
text_file.close()
def parse():
"""
extract all the lat longs and elements, return as list
"""
values = []
for file in os.listdir('/tmp/street_data/'):
with open('/tmp/street_data/' + file, 'r') as f:
data = f.readlines()
data = ''.join(data)
soup = BeautifulSoup(data)
tables = soup.findAll('table')
for table in tables:
print(table)
time = table.find('eventtime')
lat = table.find('latitude')
long = table.find('longitude')
values.append({'lat': lat, 'long': long, 'time': time})
print(lat,long,time)
return values
def load(values):
"""
load into some format for later study
params = a list of values to laod
"""
import sqlite3
conn = sqlite3.connect('./example.db')
df =
|
pd.DataFrame(values)
|
pandas.DataFrame
|
import logging
import math
import os
import pandas as pd
import pandas._libs.json as ujson
from cirrocumulus.diff_exp import DE
from .data_processing import get_filter_str, get_mask
from .envir import CIRRO_SERVE, CIRRO_MAX_WORKERS, CIRRO_DATABASE_CLASS, CIRRO_JOB_RESULTS, CIRRO_JOB_TYPE
from .fdr import fdrcorrection
from .util import create_instance, add_dataset_providers, get_fs, import_path, open_file
executor = None
job_id_2_future = dict()
logger = logging.getLogger('cirro')
def save_job_result_to_file(result, job_id):
new_result = dict()
new_result['content-type'] = result.pop('content-type')
if new_result['content-type'] == 'application/json':
new_result['content-encoding'] = 'gzip'
url = os.path.join(os.environ[CIRRO_JOB_RESULTS], str(job_id) + '.json.gz')
with open_file(url, 'wt', compression='gzip') as out:
out.write(ujson.dumps(result, double_precision=2, orient='values'))
elif new_result['content-type'] == 'application/h5ad':
url = os.path.join(os.environ[CIRRO_JOB_RESULTS], str(job_id) + '.h5ad')
with get_fs(url).open(url, 'wb') as out:
result['data'].write(out)
elif new_result['content-type'] == 'application/zarr':
url = os.path.join(os.environ[CIRRO_JOB_RESULTS], str(job_id) + '.zarr')
result['data'].write_zarr(get_fs(url).get_mapper(url))
elif new_result['content-type'] == 'application/parquet':
import pyarrow.parquet as pq
import pyarrow as pa
url = os.path.join(os.environ[CIRRO_JOB_RESULTS], str(job_id) + '.parquet')
pq.write_table(pa.Table.from_pandas(result['data']), url, filesystem=get_fs(url))
else:
raise ValueError('Unknown content-type {}'.format(new_result['content-type']))
new_result['url'] = url
return new_result
def delete_job(job_id):
future = job_id_2_future.get(job_id)
if future is not None and not future.done():
del job_id_2_future[job_id]
future.cancel()
logger.info('Cancel job {}'.format(job_id))
def done_callback(future):
for job_id in list(job_id_2_future.keys()):
if job_id_2_future[job_id] == future:
del job_id_2_future[job_id]
logger.info('Job {} done'.format(job_id))
break
def submit_job(database_api, dataset_api, email, dataset, job_name, job_type, params):
global executor
is_serve = os.environ.get(CIRRO_SERVE) == 'true'
if executor is None:
max_workers = int(os.environ.get(CIRRO_MAX_WORKERS, '2' if is_serve else '1'))
if max_workers > 0:
import multiprocessing
from concurrent.futures.process import ProcessPoolExecutor
from concurrent.futures.thread import ThreadPoolExecutor
executor = ProcessPoolExecutor(max_workers=max_workers, mp_context=multiprocessing.get_context(
'spawn')) if is_serve else ThreadPoolExecutor(
max_workers=max_workers)
job_id = database_api.create_job(email=email, dataset_id=dataset['id'], job_name=job_name, job_type=job_type,
params=params)
if executor is not None:
future = executor.submit(run_job, email, job_id, job_type, dataset, params,
database_api if not is_serve else None,
dataset_api if not is_serve else None)
future.add_done_callback(done_callback)
job_id_2_future[job_id] = future
else:
run_job(email, job_id, job_type, dataset, params, database_api if not is_serve else None,
dataset_api if not is_serve else None)
return job_id
def get_obs(dataset_api, dataset, dataset_info, params):
obs_fields = params.get('obs')
if obs_fields is not None:
obs = dataset_api.read_dataset(keys=dict(obs=obs_fields), dataset=dataset).obs
obs_field = obs_fields[0]
if len(obs_fields) > 1:
# combine in to one field
obs_field = '_'.join(obs_fields)
obs[obs_field] = obs[obs_fields[0]].astype(str)
for i in range(1, len(obs_fields)):
obs[obs_field] += '_' + obs[obs_fields[i]].astype(str)
obs[obs_field] = obs[obs_field].astype('category')
return obs, obs_field
else:
filters = [params['filter'], params['filter2']]
filter_names = [get_filter_str(params['filter']), get_filter_str(params['filter2'])]
for i in range(len(filter_names)):
if filter_names[i] is None:
filter_names[i] = 'group_' + str(i + 1)
obs = pd.DataFrame(index=
|
pd.RangeIndex(dataset_info['shape'][0])
|
pandas.RangeIndex
|
import pandas as pd
from langdetect import detect
from googletrans import Translator
from Scraping_tools import create_table_artist_link_lyrics
from Scraping_tools import add_text_Lyrics_column
import os
def get_language(x):
"""Main language in a text"""
return detect(x)
def get_len(x):
"""len of string"""
return len(x)
def clean_table_add_language(df):
"""Function to remove links without lyrics
and add a column with the main language
of the database"""
# Remove songs without Lyrics on the html page
df.dropna(inplace=True)
# Add a column with string length
df['Length']=df['Text Lyrics'].apply(get_len)
# Remove lyrics with less than 50 letters
indexnames = df[df['Length'] < 50].index
df.drop(indexnames,inplace = True)
# Detect main language of each song and label into a new column
df['Main Language'] = df['Text Lyrics'].apply(get_language)
return df
def translate_to_english(text_in):
"""This function will convert a string
in english"""
trans = Translator()
translation = trans.translate(text_in)
text_english = translation.text
return text_english
def update_dataframe_to_english(df):
"""This function will convert the lyrics
text not in english oto english"""
df['English Translation Lyrics'] = ''
mask = (df['Main Language'] != 'en')
df.loc[mask,'English Translation Lyrics'] = df.loc[mask,'Text Lyrics'].apply(translate_to_english)
return df
def create_database_save_to_disk(artist, data_folder):
"""This function takes the artist's name as input,
create a database of its songs and lyrics and
save it to disk"""
#Create table with all songs title and url to lyrics
df_artist = create_table_artist_link_lyrics(artist)
#Add a column with clean lyrics for each song
df_artist = add_text_Lyrics_column(df_artist)
print(f'Done converting to text for {artist}')
#Clean the table and add language label for each song
df_artist = clean_table_add_language(df_artist)
print(f'Done clean table for {artist}')
#Update non english songs (comment if not relevant )
df_artist = update_dataframe_to_english(df_artist)
print(f'Done translating for {artist}')
#Save to disk
path_artist = os.path.join(data_folder,f'{artist}_songs.csv')
df_artist.to_csv(path_artist,index=False)
print(f'Done save to disk for {artist}')
return
def merge_databases_into_one(list_artist, data_folder, file_name_to_save):
"""This function takes a list of artist as input,
merge all the songs into one database and
save it to disk"""
df_merge =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 15:27:23 2020
@author: saksh
Main execution file for market_networks paper; Reccommended to use market_networks(phase_3).ipynb for a more thorough analysis
Adjust the file path in import_csv according to position of file
"""
#init
import pandas as pd
import numpy as np
np.random.seed(1337) #random state used throughout the notebook for reproducibility
from math import log
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import seaborn as sns
from datetime import datetime
import networkx as nx
import community as louvain
from collections import Counter
import random
from preprocess_funcs import louvain_community, variation_of_information, pd_fill_diagonal
plt.style.use('classic')
#dataset import
sp500 =
|
pd.read_csv('/content/drive/My Drive/collab_files/^GSPC.csv', header = 0, index_col = 'Date')
|
pandas.read_csv
|
import numpy as np
import pandas as pd
from shapely.geometry import Point
from mfsetup.fileio import check_source_files
from mfsetup.grid import get_ij
def read_observation_data(f=None, column_info=None,
column_mappings=None):
df = pd.read_csv(f)
df.columns = [s.lower() for s in df.columns]
df['file'] = f
xcol = column_info.get('x_location_col', 'x')
ycol = column_info.get('y_location_col', 'y')
obstype_col = column_info.get('obstype_col', 'obs_type')
rename = {xcol: 'x',
ycol: 'y',
}
if obstype_col is not None:
rename.update({obstype_col.lower(): 'obs_type'})
print(' observation type col: {}'.format(obstype_col))
else:
print(' no observation type col specified; observations assumed to be heads')
if column_mappings is not None:
for k, v in column_mappings.items():
if not isinstance(v, list):
v = [v]
for vi in v:
rename.update({vi.lower(): k.lower()})
if vi in df.columns:
print(' observation label column: {}'.format(vi))
if xcol is None or xcol.lower() not in rename: # df.columns:
raise ValueError("Column {} not in {}; need to specify x_location_col in config file"
.format(xcol, f))
else:
print(' x location col: {}'.format(xcol))
if ycol is None or ycol.lower() not in rename: # df.columns:
raise ValueError("Column {} not in {}; need to specify y_location_col in config file"
.format(ycol, f))
else:
print(' y location col: {}'.format(ycol))
df.rename(columns=rename, inplace=True)
return df
def setup_head_observations(model, obs_info_files=None,
format='hyd',
obsname_column='obsname'):
self = model
package = format
source_data_config = self.cfg[package]['source_data']
# set a 14 character obsname limit for the hydmod package
# https://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/index.html?hyd.htm
# 40 character limit for MODFLOW-6 (see IO doc)
obsname_character_limit = 40
if format == 'hyd':
obsname_character_limit = 14
# TODO: read head observation data using TabularSourceData instead
if obs_info_files is None:
for key in 'filename', 'filenames':
if key in source_data_config:
obs_info_files = source_data_config[key]
if obs_info_files is None:
print("No data for the Observation (OBS) utility.")
return
# get obs_info_files into dictionary format
# filename: dict of column names mappings
if isinstance(obs_info_files, str):
obs_info_files = [obs_info_files]
if isinstance(obs_info_files, list):
obs_info_files = {f: self.cfg[package]['default_columns']
for f in obs_info_files}
elif isinstance(obs_info_files, dict):
for k, v in obs_info_files.items():
if v is None:
obs_info_files[k] = self.cfg[package]['default_columns']
check_source_files(obs_info_files.keys())
# dictionaries mapping from obstypes to hydmod input
pckg = {'LK': 'BAS', # head package for high-K lakes; lake package lakes get dropped
'GW': 'BAS',
'head': 'BAS',
'lake': 'BAS',
'ST': 'SFR',
'flux': 'SFR'
}
arr = {'LK': 'HD', # head package for high-K lakes; lake package lakes get dropped
'GW': 'HD',
'ST': 'SO',
'flux': 'SO'
}
print('Reading observation files...')
dfs = []
for f, column_info in obs_info_files.items():
print(f)
column_mappings = self.cfg[package]['source_data'].get('column_mappings')
df = read_observation_data(f, column_info,
column_mappings=column_mappings)
if 'obs_type' in df.columns and 'pckg' not in df.columns:
df['pckg'] = [pckg.get(s, 'BAS') for s in df['obs_type']]
elif 'pckg' not in df.columns:
df['pckg'] = 'BAS' # default to getting heads
if 'obs_type' in df.columns and 'intyp' not in df.columns:
df['arr'] = [arr.get(s, 'HD') for s in df['obs_type']]
elif 'arr' not in df.columns:
df['arr'] = 'HD'
df['intyp'] = ['I' if p == 'BAS' else 'C' for p in df['pckg']]
df[obsname_column] = df[obsname_column].astype(str).str.lower()
dfs.append(df[['pckg', 'arr', 'intyp', 'x', 'y', obsname_column, 'file']])
df =
|
pd.concat(dfs, axis=0)
|
pandas.concat
|
"""
.. module:: trend
:synopsis: Trend Indicators.
.. moduleauthor:: <NAME> (Bukosabino)
"""
import numpy as np
import pandas as pd
from ta.utils import IndicatorMixin, ema, get_min_max
class AroonIndicator(IndicatorMixin):
"""Aroon Indicator
Identify when trends are likely to change direction.
Aroon Up = ((N - Days Since N-day High) / N) x 100
Aroon Down = ((N - Days Since N-day Low) / N) x 100
Aroon Indicator = Aroon Up - Aroon Down
https://www.investopedia.com/terms/a/aroon.asp
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 25, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
rolling_close = self._close.rolling(self._n, min_periods=0)
self._aroon_up = rolling_close.apply(
lambda x: float(np.argmax(x) + 1) / self._n * 100, raw=True)
self._aroon_down = rolling_close.apply(
lambda x: float(np.argmin(x) + 1) / self._n * 100, raw=True)
def aroon_up(self) -> pd.Series:
"""Aroon Up Channel
Returns:
pandas.Series: New feature generated.
"""
aroon_up = self._check_fillna(self._aroon_up, value=0)
return pd.Series(aroon_up, name=f'aroon_up_{self._n}')
def aroon_down(self) -> pd.Series:
"""Aroon Down Channel
Returns:
pandas.Series: New feature generated.
"""
aroon_down = self._check_fillna(self._aroon_down, value=0)
return pd.Series(aroon_down, name=f'aroon_down_{self._n}')
def aroon_indicator(self) -> pd.Series:
"""Aroon Indicator
Returns:
pandas.Series: New feature generated.
"""
aroon_diff = self._aroon_up - self._aroon_down
aroon_diff = self._check_fillna(aroon_diff, value=0)
return pd.Series(aroon_diff, name=f'aroon_ind_{self._n}')
class MACD(IndicatorMixin):
"""Moving Average Convergence Divergence (MACD)
Is a trend-following momentum indicator that shows the relationship between
two moving averages of prices.
https://school.stockcharts.com/doku.php?id=technical_indicators:moving_average_convergence_divergence_macd
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
n_slow(int): n period long-term.
n_sign(int): n period to signal.
fillna(bool): if True, fill nan values.
"""
def __init__(self,
close: pd.Series,
n_slow: int = 26,
n_fast: int = 12,
n_sign: int = 9,
fillna: bool = False):
self._close = close
self._n_slow = n_slow
self._n_fast = n_fast
self._n_sign = n_sign
self._fillna = fillna
self._run()
def _run(self):
self._emafast = ema(self._close, self._n_fast, self._fillna)
self._emaslow = ema(self._close, self._n_slow, self._fillna)
self._macd = self._emafast - self._emaslow
self._macd_signal = ema(self._macd, self._n_sign, self._fillna)
self._macd_diff = self._macd - self._macd_signal
def macd(self) -> pd.Series:
"""MACD Line
Returns:
pandas.Series: New feature generated.
"""
macd = self._check_fillna(self._macd, value=0)
return pd.Series(macd, name=f'MACD_{self._n_fast}_{self._n_slow}')
def macd_signal(self) -> pd.Series:
"""Signal Line
Returns:
pandas.Series: New feature generated.
"""
macd_signal = self._check_fillna(self._macd_signal, value=0)
return pd.Series(macd_signal, name=f'MACD_sign_{self._n_fast}_{self._n_slow}')
def macd_diff(self) -> pd.Series:
"""MACD Histogram
Returns:
pandas.Series: New feature generated.
"""
macd_diff = self._check_fillna(self._macd_diff, value=0)
return pd.Series(macd_diff, name=f'MACD_diff_{self._n_fast}_{self._n_slow}')
class EMAIndicator(IndicatorMixin):
"""EMA - Exponential Moving Average
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 14, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
def ema_indicator(self) -> pd.Series:
"""Exponential Moving Average (EMA)
Returns:
pandas.Series: New feature generated.
"""
ema_ = ema(self._close, self._n, self._fillna)
return pd.Series(ema_, name=f'ema_{self._n}')
class TRIXIndicator(IndicatorMixin):
"""Trix (TRIX)
Shows the percent rate of change of a triple exponentially smoothed moving
average.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:trix
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 15, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
ema1 = ema(self._close, self._n, self._fillna)
ema2 = ema(ema1, self._n, self._fillna)
ema3 = ema(ema2, self._n, self._fillna)
self._trix = (ema3 - ema3.shift(1, fill_value=ema3.mean())) / ema3.shift(1, fill_value=ema3.mean())
self._trix *= 100
def trix(self) -> pd.Series:
"""Trix (TRIX)
Returns:
pandas.Series: New feature generated.
"""
trix = self._check_fillna(self._trix, value=0)
return pd.Series(trix, name=f'trix_{self._n}')
class MassIndex(IndicatorMixin):
"""Mass Index (MI)
It uses the high-low range to identify trend reversals based on range
expansions. It identifies range bulges that can foreshadow a reversal of
the current trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:mass_index
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
n(int): n low period.
n2(int): n high period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, n: int = 9, n2: int = 25, fillna: bool = False):
self._high = high
self._low = low
self._n = n
self._n2 = n2
self._fillna = fillna
self._run()
def _run(self):
amplitude = self._high - self._low
ema1 = ema(amplitude, self._n, self._fillna)
ema2 = ema(ema1, self._n, self._fillna)
mass = ema1 / ema2
self._mass = mass.rolling(self._n2, min_periods=0).sum()
def mass_index(self) -> pd.Series:
"""Mass Index (MI)
Returns:
pandas.Series: New feature generated.
"""
mass = self._check_fillna(self._mass, value=0)
return pd.Series(mass, name=f'mass_index_{self._n}_{self._n2}')
class IchimokuIndicator(IndicatorMixin):
"""Ichimoku Kinkō Hyō (Ichimoku)
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
n1(int): n1 low period.
n2(int): n2 medium period.
n3(int): n3 high period.
visual(bool): if True, shift n2 values.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, n1: int = 9, n2: int = 26, n3: int = 52,
visual: bool = False, fillna: bool = False):
self._high = high
self._low = low
self._n1 = n1
self._n2 = n2
self._n3 = n3
self._visual = visual
self._fillna = fillna
def ichimoku_a(self) -> pd.Series:
"""Senkou Span A (Leading Span A)
Returns:
pandas.Series: New feature generated.
"""
conv = 0.5 * (self._high.rolling(self._n1, min_periods=0).max()
+ self._low.rolling(self._n1, min_periods=0).min())
base = 0.5 * (self._high.rolling(self._n2, min_periods=0).max()
+ self._low.rolling(self._n2, min_periods=0).min())
spana = 0.5 * (conv + base)
spana = spana.shift(self._n2, fill_value=spana.mean()) if self._visual else spana
spana = self._check_fillna(spana, value=-1)
return pd.Series(spana, name=f'ichimoku_a_{self._n1}_{self._n2}')
def ichimoku_b(self) -> pd.Series:
"""Senkou Span B (Leading Span B)
Returns:
pandas.Series: New feature generated.
"""
spanb = 0.5 * (self._high.rolling(self._n3, min_periods=0).max()
+ self._low.rolling(self._n3, min_periods=0).min())
spanb = spanb.shift(self._n2, fill_value=spanb.mean()) if self._visual else spanb
spanb = self._check_fillna(spanb, value=-1)
return pd.Series(spanb, name=f'ichimoku_b_{self._n1}_{self._n2}')
class KSTIndicator(IndicatorMixin):
"""KST Oscillator (KST Signal)
It is useful to identify major stock market cycle junctures because its
formula is weighed to be more greatly influenced by the longer and more
dominant time spans, in order to better reflect the primary swings of stock
market cycle.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:know_sure_thing_kst
Args:
close(pandas.Series): dataset 'Close' column.
r1(int): r1 period.
r2(int): r2 period.
r3(int): r3 period.
r4(int): r4 period.
n1(int): n1 smoothed period.
n2(int): n2 smoothed period.
n3(int): n3 smoothed period.
n4(int): n4 smoothed period.
nsig(int): n period to signal.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, r1: int = 10, r2: int = 15, r3: int = 20, r4: int = 30,
n1: int = 10, n2: int = 10, n3: int = 10, n4: int = 15, nsig: int = 9,
fillna: bool = False):
self._close = close
self._r1 = r1
self._r2 = r2
self._r3 = r3
self._r4 = r4
self._n1 = n1
self._n2 = n2
self._n3 = n3
self._n4 = n4
self._nsig = nsig
self._fillna = fillna
self._run()
def _run(self):
rocma1 = ((self._close - self._close.shift(self._r1, fill_value=self._close.mean()))
/ self._close.shift(self._r1, fill_value=self._close.mean())).rolling(self._n1, min_periods=0).mean()
rocma2 = ((self._close - self._close.shift(self._r2, fill_value=self._close.mean()))
/ self._close.shift(self._r2, fill_value=self._close.mean())).rolling(self._n2, min_periods=0).mean()
rocma3 = ((self._close - self._close.shift(self._r3, fill_value=self._close.mean()))
/ self._close.shift(self._r3, fill_value=self._close.mean())).rolling(self._n3, min_periods=0).mean()
rocma4 = ((self._close - self._close.shift(self._r4, fill_value=self._close.mean()))
/ self._close.shift(self._r4, fill_value=self._close.mean())).rolling(self._n4, min_periods=0).mean()
self._kst = 100 * (rocma1 + 2 * rocma2 + 3 * rocma3 + 4 * rocma4)
self._kst_sig = self._kst.rolling(self._nsig, min_periods=0).mean()
def kst(self) -> pd.Series:
"""Know Sure Thing (KST)
Returns:
pandas.Series: New feature generated.
"""
kst = self._check_fillna(self._kst, value=0)
return pd.Series(kst, name='kst')
def kst_sig(self) -> pd.Series:
"""Signal Line Know Sure Thing (KST)
nsig-period SMA of KST
Returns:
pandas.Series: New feature generated.
"""
kst_sig = self._check_fillna(self._kst_sig, value=0)
return pd.Series(kst_sig, name='kst_sig')
def kst_diff(self) -> pd.Series:
"""Diff Know Sure Thing (KST)
KST - Signal_KST
Returns:
pandas.Series: New feature generated.
"""
kst_diff = self._kst - self._kst_sig
kst_diff = self._check_fillna(kst_diff, value=0)
return pd.Series(kst_diff, name='kst_diff')
class DPOIndicator(IndicatorMixin):
"""Detrended Price Oscillator (DPO)
Is an indicator designed to remove trend from price and make it easier to
identify cycles.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:detrended_price_osci
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 20, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
self._dpo = (self._close.shift(int((0.5 * self._n) + 1), fill_value=self._close.mean())
- self._close.rolling(self._n, min_periods=0).mean())
def dpo(self) -> pd.Series:
"""Detrended Price Oscillator (DPO)
Returns:
pandas.Series: New feature generated.
"""
dpo = self._check_fillna(self._dpo, value=0)
return pd.Series(dpo, name='dpo_'+str(self._n))
class CCIIndicator(IndicatorMixin):
"""Commodity Channel Index (CCI)
CCI measures the difference between a security's price change and its
average price change. High positive readings indicate that prices are well
above their average, which is a show of strength. Low negative readings
indicate that prices are well below their average, which is a show of
weakness.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:commodity_channel_index_cci
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
c(int): constant.
fillna(bool): if True, fill nan values.
"""
def __init__(self,
high: pd.Series,
low: pd.Series,
close: pd.Series,
n: int = 20,
c: float = 0.015,
fillna: bool = False):
self._high = high
self._low = low
self._close = close
self._n = n
self._c = c
self._fillna = fillna
self._run()
def _run(self):
def _mad(x):
return np.mean(np.abs(x-np.mean(x)))
pp = (self._high + self._low + self._close) / 3.0
self._cci = ((pp - pp.rolling(self._n, min_periods=0).mean())
/ (self._c * pp.rolling(self._n, min_periods=0).apply(_mad, True)))
def cci(self) -> pd.Series:
"""Commodity Channel Index (CCI)
Returns:
pandas.Series: New feature generated.
"""
cci = self._check_fillna(self._cci, value=0)
return pd.Series(cci, name='cci')
class ADXIndicator(IndicatorMixin):
"""Average Directional Movement Index (ADX)
The Plus Directional Indicator (+DI) and Minus Directional Indicator (-DI)
are derived from smoothed averages of these differences, and measure trend
direction over time. These two indicators are often referred to
collectively as the Directional Movement Indicator (DMI).
The Average Directional Index (ADX) is in turn derived from the smoothed
averages of the difference between +DI and -DI, and measures the strength
of the trend (regardless of direction) over time.
Using these three indicators together, chartists can determine both the
direction and strength of the trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, close: pd.Series, n: int = 14, fillna: bool = False):
self._high = high
self._low = low
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
assert self._n != 0, "N may not be 0 and is %r" % n
cs = self._close.shift(1)
pdm = get_min_max(self._high, cs, 'max')
pdn = get_min_max(self._low, cs, 'min')
tr = pdm - pdn
self._trs_initial = np.zeros(self._n-1)
self._trs = np.zeros(len(self._close) - (self._n - 1))
self._trs[0] = tr.dropna()[0:self._n].sum()
tr = tr.reset_index(drop=True)
for i in range(1, len(self._trs)-1):
self._trs[i] = self._trs[i-1] - (self._trs[i-1]/float(self._n)) + tr[self._n+i]
up = self._high - self._high.shift(1)
dn = self._low.shift(1) - self._low
pos = abs(((up > dn) & (up > 0)) * up)
neg = abs(((dn > up) & (dn > 0)) * dn)
self._dip = np.zeros(len(self._close) - (self._n - 1))
self._dip[0] = pos.dropna()[0:self._n].sum()
pos = pos.reset_index(drop=True)
for i in range(1, len(self._dip)-1):
self._dip[i] = self._dip[i-1] - (self._dip[i-1]/float(self._n)) + pos[self._n+i]
self._din = np.zeros(len(self._close) - (self._n - 1))
self._din[0] = neg.dropna()[0:self._n].sum()
neg = neg.reset_index(drop=True)
for i in range(1, len(self._din)-1):
self._din[i] = self._din[i-1] - (self._din[i-1]/float(self._n)) + neg[self._n+i]
def adx(self) -> pd.Series:
"""Average Directional Index (ADX)
Returns:
pandas.Series: New feature generated.
"""
dip = np.zeros(len(self._trs))
for i in range(len(self._trs)):
dip[i] = 100 * (self._dip[i]/self._trs[i])
din = np.zeros(len(self._trs))
for i in range(len(self._trs)):
din[i] = 100 * (self._din[i]/self._trs[i])
dx = 100 * np.abs((dip - din) / (dip + din))
adx = np.zeros(len(self._trs))
adx[self._n] = dx[0:self._n].mean()
for i in range(self._n+1, len(adx)):
adx[i] = ((adx[i-1] * (self._n - 1)) + dx[i-1]) / float(self._n)
adx = np.concatenate((self._trs_initial, adx), axis=0)
self._adx = pd.Series(data=adx, index=self._close.index)
adx = self._check_fillna(self._adx, value=20)
return pd.Series(adx, name='adx')
def adx_pos(self) -> pd.Series:
"""Plus Directional Indicator (+DI)
Returns:
pandas.Series: New feature generated.
"""
dip = np.zeros(len(self._close))
for i in range(1, len(self._trs)-1):
dip[i+self._n] = 100 * (self._dip[i]/self._trs[i])
adx_pos = self._check_fillna(pd.Series(dip, index=self._close.index), value=20)
return pd.Series(adx_pos, name='adx_pos')
def adx_neg(self) -> pd.Series:
"""Minus Directional Indicator (-DI)
Returns:
pandas.Series: New feature generated.
"""
din = np.zeros(len(self._close))
for i in range(1, len(self._trs)-1):
din[i+self._n] = 100 * (self._din[i]/self._trs[i])
adx_neg = self._check_fillna(pd.Series(din, index=self._close.index), value=20)
return pd.Series(adx_neg, name='adx_neg')
class VortexIndicator(IndicatorMixin):
"""Vortex Indicator (VI)
It consists of two oscillators that capture positive and negative trend
movement. A bullish signal triggers when the positive trend indicator
crosses above the negative trend indicator or a key level.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:vortex_indicator
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, close: pd.Series, n: int = 14, fillna: bool = False):
self._high = high
self._low = low
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
tr = (self._high.combine(self._close.shift(1, fill_value=self._close.mean()), max)
- self._low.combine(self._close.shift(1, fill_value=self._close.mean()), min))
trn = tr.rolling(self._n).sum()
vmp = np.abs(self._high - self._low.shift(1))
vmm = np.abs(self._low - self._high.shift(1))
self._vip = vmp.rolling(self._n, min_periods=0).sum() / trn
self._vin = vmm.rolling(self._n, min_periods=0).sum() / trn
def vortex_indicator_pos(self):
"""+VI
Returns:
pandas.Series: New feature generated.
"""
vip = self._check_fillna(self._vip, value=1)
return pd.Series(vip, name='vip')
def vortex_indicator_neg(self):
"""-VI
Returns:
pandas.Series: New feature generated.
"""
vin = self._check_fillna(self._vin, value=1)
return pd.Series(vin, name='vin')
def vortex_indicator_diff(self):
"""Diff VI
Returns:
pandas.Series: New feature generated.
"""
vid = self._vip - self._vin
vid = self._check_fillna(vid, value=0)
return pd.Series(vid, name='vid')
class PSARIndicator(IndicatorMixin):
"""Parabolic Stop and Reverse (Parabolic SAR)
The Parabolic Stop and Reverse, more commonly known as the
Parabolic SAR,is a trend-following indicator developed by
<NAME>. The Parabolic SAR is displayed as a single
parabolic line (or dots) underneath the price bars in an uptrend,
and above the price bars in a downtrend.
https://school.stockcharts.com/doku.php?id=technical_indicators:parabolic_sar
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
step(float): the Acceleration Factor used to compute the SAR.
max_step(float): the maximum value allowed for the Acceleration Factor.
"""
def __init__(self, high: pd.Series, low: pd.Series, close: pd.Series,
step: float = 0.02, max_step: float = 0.20,
fillna: bool = False):
self._high = high
self._low = low
self._close = close
self._step = step
self._max_step = max_step
self._fillna = fillna
self._run()
def _run(self):
up_trend = True
af = self._step
up_trend_high = self._high.iloc[0]
down_trend_low = self._low.iloc[0]
self._psar = self._close.copy()
self._psar_up = pd.Series(index=self._psar.index)
self._psar_down = pd.Series(index=self._psar.index)
for i in range(2, len(self._close)):
reversal = False
max_high = self._high.iloc[i]
min_low = self._low.iloc[i]
if up_trend:
self._psar.iloc[i] = self._psar.iloc[i-1] + (
af * (up_trend_high - self._psar.iloc[i-1]))
if min_low < self._psar.iloc[i]:
reversal = True
self._psar.iloc[i] = up_trend_high
down_trend_low = min_low
af = self._step
else:
if max_high > up_trend_high:
up_trend_high = max_high
af = min(af + self._step, self._max_step)
l1 = self._low.iloc[i-1]
l2 = self._low.iloc[i-2]
if l2 < self._psar.iloc[i]:
self._psar.iloc[i] = l2
elif l1 < self._psar.iloc[i]:
self._psar.iloc[i] = l1
else:
self._psar.iloc[i] = self._psar.iloc[i-1] - (
af * (self._psar.iloc[i-1] - down_trend_low))
if max_high > self._psar.iloc[i]:
reversal = True
self._psar.iloc[i] = down_trend_low
up_trend_high = max_high
af = self._step
else:
if min_low < down_trend_low:
down_trend_low = min_low
af = min(af + self._step, self._max_step)
h1 = self._high.iloc[i-1]
h2 = self._high.iloc[i-2]
if h2 > self._psar.iloc[i]:
self._psar[i] = h2
elif h1 > self._psar.iloc[i]:
self._psar.iloc[i] = h1
up_trend = up_trend != reversal # XOR
if up_trend:
self._psar_up.iloc[i] = self._psar.iloc[i]
else:
self._psar_down.iloc[i] = self._psar.iloc[i]
def psar(self) -> pd.Series:
"""PSAR value
Returns:
pandas.Series: New feature generated.
"""
psar = self._check_fillna(self._psar, value=-1)
return pd.Series(psar, name='psar')
def psar_up(self) -> pd.Series:
"""PSAR up trend value
Returns:
pandas.Series: New feature generated.
"""
psar_up = self._check_fillna(self._psar_up, value=-1)
return pd.Series(psar_up, name='psarup')
def psar_down(self) -> pd.Series:
"""PSAR down trend value
Returns:
pandas.Series: New feature generated.
"""
psar_down = self._check_fillna(self._psar_down, value=-1)
return
|
pd.Series(psar_down, name='psardown')
|
pandas.Series
|
import pandas as pd
import pytest
from nibabel.tmpdirs import InTemporaryDirectory
from nilearn._utils.stats import _check_events_file_uses_tab_separators
def make_data_for_test_runs():
data_for_temp_datafile = [
['csf', 'constant', 'linearTrend', 'wm'],
[13343.032102491035, 1.0, 0.0, 9486.199545677482],
[13329.224068063204, 1.0, 1.0, 9497.003324892803],
[13291.755627241291, 1.0, 2.0, 9484.012965365506],
]
delimiters = {
'tab': '\t',
'comma': ',',
'space': ' ',
'semicolon': ';',
'hyphen': '-',
}
return data_for_temp_datafile, delimiters
def _create_test_file(temp_csv, test_data, delimiter):
test_data = pd.DataFrame(test_data)
test_data.to_csv(temp_csv, sep=delimiter)
def _run_test_for_invalid_separator(filepath, delimiter_name):
if delimiter_name not in ('tab', 'comma'):
with pytest.raises(ValueError):
_check_events_file_uses_tab_separators(events_files=filepath)
else:
result = _check_events_file_uses_tab_separators(events_files=filepath)
assert result is None
def test_for_invalid_separator():
data_for_temp_datafile, delimiters = make_data_for_test_runs()
for delimiter_name, delimiter_char in delimiters.items():
with InTemporaryDirectory():
temp_tsv_file = 'tempfile.{} separated values'.format(
delimiter_name)
_create_test_file(temp_csv=temp_tsv_file,
test_data=data_for_temp_datafile,
delimiter=delimiter_char)
_run_test_for_invalid_separator(filepath=temp_tsv_file,
delimiter_name=delimiter_name)
def test_with_2D_dataframe():
data_for_pandas_dataframe, _ = make_data_for_test_runs()
events_pandas_dataframe = pd.DataFrame(data_for_pandas_dataframe)
result = _check_events_file_uses_tab_separators(
events_files=events_pandas_dataframe)
assert result is None
def test_with_1D_dataframe():
data_for_pandas_dataframe, _ = make_data_for_test_runs()
for dataframe_ in data_for_pandas_dataframe:
events_pandas_dataframe =
|
pd.DataFrame(dataframe_)
|
pandas.DataFrame
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pandas.util.testing import assert_frame_equal
from pyflink.common import Row
from pyflink.table import expressions as expr, ListView
from pyflink.table.types import DataTypes
from pyflink.table.udf import udf, udtf, udaf, AggregateFunction, TableAggregateFunction, udtaf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBlinkBatchTableTestCase, \
PyFlinkBlinkStreamTableTestCase
class RowBasedOperationTests(object):
def test_map(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
func = udf(lambda x: Row(x + 1, x * x), result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())]))
t.map(func(t.b)).alias("a", "b") \
.map(func(t.a)).alias("a", "b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["4,9", "3,4", "7,36", "10,81", "5,16"])
def test_map_with_pandas_udf(self):
t = self.t_env.from_elements(
[(1, Row(2, 3)), (2, Row(1, 3)), (1, Row(5, 4)), (1, Row(8, 6)), (2, Row(3, 4))],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b",
DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.INT())]))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
def func(x, y):
import pandas as pd
a = (x * 2).rename('b')
res = pd.concat([a, x], axis=1) + y
return res
pandas_udf = udf(func,
result_type=DataTypes.ROW(
[DataTypes.FIELD("c", DataTypes.BIGINT()),
DataTypes.FIELD("d", DataTypes.BIGINT())]),
func_type='pandas')
t.map(pandas_udf(t.a, t.b)).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["3,5", "3,7", "6,6", "9,8", "5,8"])
def test_flat_map(self):
t = self.t_env.from_elements(
[(1, "2,3", 3), (2, "1", 3), (1, "5,6,7", 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.STRING()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.BIGINT(), DataTypes.STRING()])
self.t_env.register_table_sink("Results", table_sink)
@udtf(result_types=[DataTypes.INT(), DataTypes.STRING()])
def split(x, string):
for s in string.split(","):
yield x, s
t.flat_map(split(t.a, t.b)) \
.alias("a, b") \
.flat_map(split(t.a, t.b)) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,2", "1,3", "2,1", "1,5", "1,6", "1,7"])
class BatchRowBasedOperationITTests(RowBasedOperationTests, PyFlinkBlinkBatchTableTestCase):
def test_aggregate_with_pandas_udaf(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[DataTypes.TINYINT(), DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
pandas_udaf = udaf(lambda a: (a.mean(), a.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
t.group_by(t.a) \
.aggregate(pandas_udaf(t.b).alias("c", "d")) \
.select("a, c, d").execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,5.0,8", "2,2.0,3"])
def test_aggregate_with_pandas_udaf_without_keys(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
pandas_udaf = udaf(lambda a: Row(a.mean(), a.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
t.aggregate(pandas_udaf(t.b).alias("c", "d")) \
.select("c, d").execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["3.8,8"])
def test_window_aggregate_with_pandas_udaf(self):
import datetime
from pyflink.table.window import Tumble
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 4, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 40, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT(),
DataTypes.INT()
])
self.t_env.register_table_sink("Results", table_sink)
pandas_udaf = udaf(lambda a: (a.mean(), a.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
tumble_window = Tumble.over(expr.lit(1).hours) \
.on(expr.col("rowtime")) \
.alias("w")
t.window(tumble_window) \
.group_by("w") \
.aggregate(pandas_udaf(t.b).alias("d", "e")) \
.select("w.rowtime, d, e") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["2018-03-11 03:59:59.999,2.2,3",
"2018-03-11 04:59:59.999,8.0,8"])
class StreamRowBasedOperationITTests(RowBasedOperationTests, PyFlinkBlinkStreamTableTestCase):
def test_aggregate(self):
import pandas as pd
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
function = CountAndSumAggregateFunction()
agg = udaf(function,
result_type=function.get_result_type(),
accumulator_type=function.get_accumulator_type(),
name=str(function.__class__.__name__))
result = t.group_by(t.a) \
.aggregate(agg(t.b).alias("c", "d")) \
.select("a, c, d") \
.to_pandas()
assert_frame_equal(result, pd.DataFrame([[1, 3, 15], [2, 2, 4]], columns=['a', 'c', 'd']))
def test_flat_aggregate(self):
import pandas as pd
self.t_env.register_function("mytop", Top2())
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(5, 'Hi2', 'hi'),
(7, 'Hi', 'Hello'),
(2, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.group_by("c") \
.flat_aggregate("mytop(a)") \
.select("c, a") \
.flat_aggregate("mytop(a)") \
.select("a") \
.to_pandas()
assert_frame_equal(result,
|
pd.DataFrame([[7], [5]], columns=['a'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pandas as pd
import pandapower as pp
from pandapower.pd2ppc import _pd2ppc
from pandapower.run import rundcpp
from pandas import DataFrame, notnull, isnull
from pandapower.topology import create_nxgraph, connected_component
def estimate_voltage_vector(net):
"""
Function initializes the voltage vector of net with a rough estimation. All buses are set to the
slack bus voltage. Transformer differences in magnitude and phase shifting are accounted for.
:param net: pandapower network
:return: pandas dataframe with estimated vm_pu and va_degree
"""
res_bus =
|
DataFrame(index=net.bus.index, columns=["vm_pu", "va_degree"])
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 12:46:52 2020
Updated on June 3, 2021
@author: jacob
"""
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as optim
import numpy as np
from scipy import stats
#from heatmap import heatmap_gr, heatmap_ymax
#from graph_repl import graph_repls
# How many time points are graphed
XSCALE = 97
def graph_avg(df_dict, con_data, exp_data, con_name, exp_name, data_path, plate_list, hm_flag=False, log_flag=False):
""" Plot Formatting """
# You typically want your plot to be ~1.33x wider than tall.
# Common sizes: (10, 7.5) and (12, 9)
plt.figure(figsize=(10, 7.5))
con_color = "#0466c8"
exp_color = "#d62828"
# Remove the plot frame lines. They are unnecessary
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
# Set background to white
ax.set_facecolor('white')
# Ensure that the axis ticks only show up on the bottom and left of the plot.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
plt.ylim(0, 2)
plt.xlim(0, XSCALE)
# Make sure your axis ticks are large enough to be easily read.
plt.yticks(np.arange(0, 1.7, 0.2), [str(round(x, 1)) for x in np.arange(0, 1.7, 0.2)], fontsize=14)
plt.xticks(np.arange(0, XSCALE, 24), [str(round(x,1)) for x in np.arange(0, XSCALE, 24)], fontsize=14)
# Provide tick lines across the plot to help your viewers trace along the axis ticks.
for y in np.arange(0, 1.7, 0.2):
plt.plot(range(0, XSCALE), [y] * len(range(0, XSCALE)), "--", lw=0.5, color="black", alpha=0.3)
""" Calculations """
# Parameter fits for individual control wells
control_grs = []
control_ymaxs = []
# Storing wells to compute average of replicate
control_wells = []
con_avg = con_name + "_avg"
# Lists of parameter values for the experimental replicate
exp_grs = []
exp_ymaxs = []
# Storing wells to compute average of replicate
exp_wells = []
exp_avg = exp_name + "_avg"
avg_df = pd.DataFrame()
# Calculate parameter values for individual wells
for plate_name in con_data.keys():
# Plate
df = df_dict[plate_name]
plat = plate_list[plate_name]
# Wells in that specific plate that belong to given control replicate
wells = con_data[plate_name]
for well in wells:
if well == "":
break
control_wells.append(df[well])
gr, ymax, line = fit_model(df, well)
plat.add_params(gr, ymax, well)
if gr < 2:
control_grs.append(gr)
if ymax < 2:
control_ymaxs.append(ymax)
for plate_name in exp_data.keys():
# Plate
df = df_dict[plate_name]
plat = plate_list[plate_name]
# Wells in that specific plate that belong to given control replicate
wells = exp_data[plate_name]
for well in wells:
if well == "":
break
exp_wells.append(df[well])
gr, ymax, line = fit_model(df, well)
plat.add_params(gr, ymax, well)
if gr < 2:
exp_grs.append(gr)
if ymax < 2:
exp_ymaxs.append(ymax)
avg_df["Time"] = df["Time"]
# Calculate averages for replicates
con_mean, con_std, con_ci = avg_well(control_wells)
avg_df[con_avg] = con_mean
avg_df[con_name + "_std"] = con_std
avg_df[con_name + "_ci"] = con_ci
exp_mean, exp_std, exp_ci = avg_well(exp_wells)
avg_df[exp_avg] = exp_mean
avg_df[exp_name + "_std"] = exp_std
avg_df[exp_name + "_ci"] = exp_ci
# Parameter fits for average control model
con_gr, con_ymax, con_line = fit_model(avg_df, con_avg)
# Parameter fits for average exp model
exp_gr, exp_ymax, exp_line = fit_model(avg_df, exp_avg)
# T-test for growth rate and ymax parameter values
gr_stats = t_test(exp_grs, control_grs)
ymax_stats = t_test(exp_ymaxs, control_ymaxs)
# P-values
gr_pval = gr_stats[1]
ymax_pval = ymax_stats[1]
if con_ymax > 0.01 and exp_ymax > 0.01:
# Normalize experimental parameters with control parameters
gr_ratio = (exp_gr / con_gr)
ymax_ratio = (exp_ymax / con_ymax)
else:
gr_ratio = 0
ymax_ratio = 0
# Symbols on graph to indicate better growth by experimental strain
better_gr = ""
if gr_ratio > 1:
better_gr += "^ "
better_ymax = ""
if ymax_ratio > 1:
better_ymax += "^ "
""" Graphing """
# Graph average experimental line
plt.plot(avg_df["Time"], avg_df[exp_avg], color=exp_color, label=(exp_name), linewidth=3.0)
# plt.plot(*exp_line, 'r', linestyle = "--", color=exp_color, linewidth=1)
# Confidence intervals
exp_ci_hi = avg_df[exp_avg] + avg_df[exp_name + "_ci"]
exp_ci_low = avg_df[exp_avg] - avg_df[exp_name + "_ci"]
plt.plot(avg_df["Time"], exp_ci_hi, color=exp_color, linestyle=":", linewidth=1.5)
plt.plot(avg_df["Time"], exp_ci_low, color=exp_color, linestyle=":", linewidth=1.5)
# Graph average control line
plt.plot(avg_df["Time"], avg_df[con_avg], color=con_color, label=(con_name), linewidth=3.0)
# plt.plot(*con_line, 'r', linestyle = "--", color=con_color, linewidth=1)
# Confidence intervals
con_ci_hi = avg_df[con_avg] + avg_df[con_name + "_ci"]
con_ci_low = avg_df[con_avg] - avg_df[con_name + "_ci"]
plt.plot(avg_df["Time"], con_ci_hi, color=con_color, linestyle=":", linewidth=1.5)
plt.plot(avg_df["Time"], con_ci_low, color=con_color, linestyle=":", linewidth=1.5)
# Plot histograms
# graph_repls(con_grs, con_ymaxs, exp_grs, exp_ymax,con_name, exp_name, data_path)
# Place a legend to the right
lgd = ax.legend(
loc = 'upper right',
borderaxespad = 0.,
facecolor = 'white',
fontsize = 16)
# Format P-values
if gr_pval < 0.001:
gr_pval = "<0.001"
else:
gr_pval = round(gr_pval, 3)
if ymax_pval < 0.001:
ymax_pval = "<0.001"
else:
ymax_pval = round(ymax_pval, 3)
plt.title(f"{exp_name} vs. {con_name}- GR ratio:{round(gr_ratio, 3)} ({gr_pval}) Ymax ratio: {round(ymax_ratio, 3)} ({ymax_pval})", fontsize=20)
path = data_path + "Graphs/Averages/" + exp_name + "_vs_" + con_name
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
# Graph each well of a replicate
def graph_indiv(df_dict, repl_data, repl_name, data_path, plate_list, log_flag=False):
""" Graph Formatting """
# You typically want your plot to be ~1.33x wider than tall.
# Common sizes: (10, 7.5) and (12, 9)
plt.figure(figsize=(10, 7.5))
# Remove the plot frame lines. They are unnecessary
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
# Set background to white
ax.set_facecolor('white')
# Ensure that the axis ticks only show up on the bottom and left of the plot.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
plt.ylim(0, 2)
plt.xlim(0, XSCALE)
# Make sure your axis ticks are large enough to be easily read.
plt.yticks(np.arange(0, 1.7, 0.2), [str(round(x, 1)) for x in np.arange(0, 1.7, 0.2)], fontsize=14)
plt.xticks(np.arange(0, XSCALE, 24), [str(round(x,1)) for x in np.arange(0, XSCALE, 24)], fontsize=14)
# Provide tick lines across the plot to help your viewers trace along the axis ticks.
for y in np.arange(0, 1.7, 0.2):
plt.plot(range(0, XSCALE), [y] * len(range(0, XSCALE)), "--", lw=0.5, color="black", alpha=0.3)
# Graph each replicate well
n = 0
for plate_name in repl_data.keys():
# Plate
df = df_dict[plate_name]
plat = plate_list[plate_name]
# Wells in that specific plate that belong to given control replicate
wells = repl_data[plate_name]
# Counter for number of viable wells
n = 0
for well in wells:
if well == "":
break
try:
gr, ymax = plat.get_params(well)
except KeyError:
gr, ymax, line = fit_model(df, well)
plat.add_params(gr, ymax, well)
if ymax > 0.2:
n += 1
plt.plot(df["Time"], df[well], label=well, linewidth=2.5)
# Place a legend to the right
lgd = ax.legend(
loc = 'upper right',
borderaxespad = 0.,
facecolor = 'white',
ncol = 2,
fontsize = 16)
plt.title(f"{repl_name} Isolates: ({n} isolates with growth)", fontsize=24)
path = data_path + "Graphs/" + repl_name
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
""" Auxillary Functions """
# P-test on individual wells
def t_test(data1, data2) -> int:
ind_ttest = stats.ttest_ind(data1, data2)
return ind_ttest
def fit_model(df, well):
# Calculate exponential portion of data for line fit
exp = exponential_section(df, well)
# Fitting lines to exponential portions to graph
slope = 0
if not exp.empty:
slope, line = fit_line(exp, well)
else:
line = [(0,0), (0,0)]
# Fit a logistical model to calculate growth rate
p0 = np.random.exponential(size=3) # Initialize random values
bounds = (0, [10000000., 100., 10000000.]) # Set bounds
# Prepare model
xt = np.array(df["Time"])
yt = np.array(df[well])
# If no logistic curve can be fit, default to less sophisticated method of fitting line to exponentional section of the graph
try:
# Fit model 1
(a, gr, ymax), cov = optim.curve_fit(logistic, xt, yt, bounds=bounds, p0=p0)
except (RuntimeError, ValueError):
gr = slope
ymax = max(df[well])
return gr, ymax, line
# Estimates expinential growth section of growth curve to compute growth rate
def exponential_section(df, well):
ymax = max(df[well])
ymin = min(df[well])
ymid = (ymax + ymin) / 2.0
span = ymax - ymin
low = ymid - (span * 0.40)
high = ymid + (span * 0.40)
exp = df.loc[(df[well] >= low) & (df[well] <= high)]
return exp[["Time", well]]
# Fits a line to a given section of a graph. Returns the slope and endpoints of the line
def fit_line(exp, well):
exp["Time"] = pd.to_numeric(exp["Time"])
exp[well] = pd.to_numeric(exp[well])
slope, intercept, r_value, p_value, std_err = stats.linregress(exp["Time"], exp[well])
x1 = int(exp.iloc[:1,:1].values)
x2 = int(exp.iloc[-1:,:1].values)
y1 = x1 * slope + intercept
y2 = x2 * slope + intercept
p1 = (x1, x2)
p2 = (y1, y2)
line = [p1, p2]
return slope, line
# Logistical funtion used to model growth rate
def logistic(t, a, b, c):
return c / (1 + a * np.exp(-b*t))
# Returns several statistics for group of dataframe columns (mean, SD, 95% CI)
def avg_well(well_list):
col =
|
pd.DataFrame()
|
pandas.DataFrame
|
# Module: Preprocess
# Author: <NAME> <<EMAIL>>
# License: MIT
import pandas as pd
import numpy as np
import ipywidgets as wg
from IPython.display import display
from ipywidgets import Layout
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin, clone
from sklearn.impute._base import _BaseImputer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
from sklearn.cross_decomposition import PLSRegression
from sklearn.manifold import TSNE
from sklearn.decomposition import IncrementalPCA
from sklearn.preprocessing import KBinsDiscretizer
from pyod.models.knn import KNN
from pyod.models.iforest import IForest
from pyod.models.pca import PCA as PCA_od
from sklearn import cluster
from scipy import stats
from sklearn.ensemble import RandomForestClassifier as rfc
from sklearn.ensemble import RandomForestRegressor as rfr
from lightgbm import LGBMClassifier as lgbmc
from lightgbm import LGBMRegressor as lgbmr
import sys
import gc
from sklearn.pipeline import Pipeline
from sklearn import metrics
from datetime import datetime
import calendar
from sklearn.preprocessing import LabelEncoder
from collections import defaultdict
from typing import Optional, Union
from pycaret.internal.logging import get_logger
from pycaret.internal.utils import infer_ml_usecase
from sklearn.utils.validation import check_is_fitted, check_X_y, check_random_state
from sklearn.utils.validation import _deprecate_positional_args
from sklearn.utils import _safe_indexing
from sklearn.exceptions import NotFittedError
pd.set_option("display.max_columns", 500)
pd.set_option("display.max_rows", 500)
SKLEARN_EMPTY_STEP = "passthrough"
# _____________________________________________________________________________________________________________________________
def str_if_not_null(x):
if pd.isnull(x) or (x is None) or pd.isna(x) or (x is not x):
return x
return str(x)
def find_id_columns(data, target, numerical_features):
# some times we have id column in the data set, we will try to find it and then will drop it if found
len_samples = len(data)
id_columns = []
for i in data.select_dtypes(
include=["object", "int64", "float64", "float32"]
).columns:
col = data[i]
if i not in numerical_features and i != target:
if sum(col.isnull()) == 0:
try:
col = col.astype("int64")
except:
continue
if col.nunique() == len_samples:
# we extract column and sort it
features = col.sort_values()
# no we subtract i+1-th value from i-th (calculating increments)
increments = features.diff()[1:]
# if all increments are 1 (with float tolerance), then the column is ID column
if sum(np.abs(increments - 1) < 1e-7) == len_samples - 1:
id_columns.append(i)
return id_columns
class DataTypes_Auto_infer(BaseEstimator, TransformerMixin):
"""
- This will try to infer data types automatically, option to override learent data types is also available.
- This alos automatically delets duplicate columns (values or same colume name), removes rows where target variable is null and
remove columns and rows where all the records are null
"""
def __init__(
self,
target,
ml_usecase,
categorical_features=[],
numerical_features=[],
time_features=[],
features_todrop=[],
id_columns=[],
display_types=True,
float_dtype="float32",
): # nothing to define
"""
User to define the target (y) variable
args:
target: string, name of the target variable
ml_usecase: string , 'regresson' or 'classification . For now, only supports two class classification
- this is useful in case target variable is an object / string . it will replace the strings with integers
categorical_features: list of categorical features, default None, when None best guess will be used to identify categorical features
numerical_features: list of numerical features, default None, when None best guess will be used to identify numerical features
time_features: list of date/time features, default None, when None best guess will be used to identify date/time features
"""
self.target = target
self.ml_usecase = ml_usecase
self.features_todrop = [str(x) for x in features_todrop]
self.categorical_features = [
x for x in categorical_features if x not in self.features_todrop
]
self.numerical_features = [
x for x in numerical_features if x not in self.features_todrop
]
self.time_features = [x for x in time_features if x not in self.features_todrop]
self.display_types = display_types
self.id_columns = id_columns
self.float_dtype = float_dtype
def fit(self, dataset, y=None): # learning data types of all the columns
"""
Args:
data: accepts a pandas data frame
Returns:
Panda Data Frame
"""
data = dataset.copy()
# also make sure that all the column names are string
data.columns = [str(i) for i in data.columns]
# drop any columns that were asked to drop
data.drop(columns=self.features_todrop, errors="ignore", inplace=True)
# remove sepcial char from column names
# data.columns= data.columns.str.replace('[,]','')
# we will take float as numberic, object as categorical from the begning
# fir int64, we will check to see what is the proportion of unique counts to the total lenght of the data
# if proportion is lower, then it is probabaly categorical
# however, proportion can be lower / disturebed due to samller denominator (total lenghth / number of samples)
# so we will take the following chart
# 0-50 samples, threshold is 24%
# 50-100 samples, th is 12%
# 50-250 samples , th is 4.8%
# 250-500 samples, th is 2.4%
# 500 and above 2% or belwo
# if there are inf or -inf then replace them with NaN
data.replace([np.inf, -np.inf], np.NaN, inplace=True)
# we canc check if somehow everything is object, we can try converting them in float
for i in data.select_dtypes(include=["object"]).columns:
try:
data[i] = data[i].astype("int64")
except:
None
for i in (
data.select_dtypes(include=["object"])
.drop(self.target, axis=1, errors="ignore")
.columns
):
try:
data[i] = pd.to_datetime(
data[i], infer_datetime_format=True, utc=False, errors="raise"
)
except:
continue
# if data type is bool or pandas Categorical , convert to categorical
for i in data.select_dtypes(include=["bool", "category"]).columns:
data[i] = data[i].astype("object")
# wiith csv , if we have any null in a colum that was int , panda will read it as float.
# so first we need to convert any such floats that have NaN and unique values are lower than 20
for i in data.select_dtypes(include=["float64"]).columns:
data[i] = data[i].astype(self.float_dtype)
# count how many Nas are there
na_count = sum(data[i].isnull())
# count how many digits are there that have decimiles
count_float = np.nansum(
[False if r.is_integer() else True for r in data[i]]
)
# total decimiels digits
count_float = (
count_float - na_count
) # reducing it because we know NaN is counted as a float digit
# now if there isnt any float digit , & unique levales are less than 20 and there are Na's then convert it to object
if (count_float == 0) & (data[i].nunique() <= 20) & (na_count > 0):
data[i] = data[i].astype("object")
# should really be an absolute number say 20
# length = len(data.iloc[:,0])
# if length in range(0,51):
# th=.25
# elif length in range(51,101):
# th=.12
# elif length in range(101,251):
# th=.048
# elif length in range(251,501):
# th=.024
# elif length > 500:
# th=.02
# if column is int and unique counts are more than two, then: (exclude target)
for i in data.select_dtypes(include=["int64"]).columns:
if i != self.target:
if data[i].nunique() <= 20: # hard coded
data[i] = data[i].apply(str_if_not_null)
else:
data[i] = data[i].astype(self.float_dtype)
# # if colum is objfloat and only have two unique counts , this is probabaly one hot encoded
# # make it object
for i in data.select_dtypes(include=[self.float_dtype]).columns:
if data[i].nunique() == 2:
data[i] = data[i].apply(str_if_not_null)
# for time & dates
# self.drop_time = [] # for now we are deleting time columns
# now in case we were given any specific columns dtypes in advance , we will over ride theos
for i in self.categorical_features:
try:
data[i] = data[i].apply(str_if_not_null)
except:
data[i] = dataset[i].apply(str_if_not_null)
for i in self.numerical_features:
try:
data[i] = data[i].astype(self.float_dtype)
except:
data[i] = dataset[i].astype(self.float_dtype)
for i in self.time_features:
try:
data[i] = pd.to_datetime(
data[i], infer_datetime_format=True, utc=False, errors="raise"
)
except:
data[i] = pd.to_datetime(
dataset[i], infer_datetime_format=True, utc=False, errors="raise"
)
for i in data.select_dtypes(
include=["datetime64", "datetime64[ns, UTC]"]
).columns:
data[i] = data[i].astype("datetime64[ns]")
# table of learent types
self.learned_dtypes = data.dtypes
# self.training_columns = data.drop(self.target,axis=1).columns
# if there are inf or -inf then replace them with NaN
data = data.replace([np.inf, -np.inf], np.NaN).astype(self.learned_dtypes)
# lets remove duplicates
# remove duplicate columns (columns with same values)
# (too expensive on bigger data sets)
# data_c = data.T.drop_duplicates()
# data = data_c.T
# remove columns with duplicate name
data = data.loc[:, ~data.columns.duplicated()]
# Remove NAs
data.dropna(axis=0, how="all", inplace=True)
data.dropna(axis=1, how="all", inplace=True)
# remove the row if target column has NA
try:
data.dropna(subset=[self.target], inplace=True)
except KeyError:
pass
# self.training_columns = data.drop(self.target,axis=1).columns
# since due to transpose , all data types have changed, lets change the dtypes to original---- not required any more since not transposing any more
# for i in data.columns: # we are taking all the columns in test , so we dot have to worry about droping target column
# data[i] = data[i].astype(self.learned_dtypes[self.learned_dtypes.index==i])
if self.display_types == True:
display(
wg.Text(
value="Following data types have been inferred automatically, if they are correct press enter to continue or type 'quit' otherwise.",
layout=Layout(width="100%"),
),
display_id="m1",
)
dt_print_out = pd.DataFrame(
self.learned_dtypes, columns=["Feature_Type"]
).drop("UNSUPERVISED_DUMMY_TARGET", errors="ignore")
dt_print_out["Data Type"] = ""
for i in dt_print_out.index:
if i != self.target:
if i in self.id_columns:
dt_print_out.loc[i, "Data Type"] = "ID Column"
elif dt_print_out.loc[i, "Feature_Type"] == "object":
dt_print_out.loc[i, "Data Type"] = "Categorical"
elif dt_print_out.loc[i, "Feature_Type"] == self.float_dtype:
dt_print_out.loc[i, "Data Type"] = "Numeric"
elif dt_print_out.loc[i, "Feature_Type"] == "datetime64[ns]":
dt_print_out.loc[i, "Data Type"] = "Date"
# elif dt_print_out.loc[i,'Feature_Type'] == 'int64':
# dt_print_out.loc[i,'Data Type'] = 'Categorical'
else:
dt_print_out.loc[i, "Data Type"] = "Label"
# if we added the dummy target column , then drop it
dt_print_out.drop(index="dummy_target", errors="ignore", inplace=True)
display(dt_print_out[["Data Type"]])
self.response = input()
if self.response in [
"quit",
"Quit",
"exit",
"EXIT",
"q",
"Q",
"e",
"E",
"QUIT",
"Exit",
]:
sys.exit(
"Read the documentation of setup to learn how to overwrite data types over the inferred types. setup function must run again before you continue modeling."
)
# drop time columns
# data.drop(self.drop_time,axis=1,errors='ignore',inplace=True)
# drop id columns
data.drop(self.id_columns, axis=1, errors="ignore", inplace=True)
return data
def transform(self, dataset, y=None):
"""
Args:
data: accepts a pandas data frame
Returns:
Panda Data Frame
"""
data = dataset.copy()
# also make sure that all the column names are string
data.columns = [str(i) for i in data.columns]
# drop any columns that were asked to drop
data.drop(columns=self.features_todrop, errors="ignore", inplace=True)
data = data[self.final_training_columns]
# also make sure that all the column names are string
data.columns = [str(i) for i in data.columns]
# if there are inf or -inf then replace them with NaN
data.replace([np.inf, -np.inf], np.NaN, inplace=True)
try:
data.dropna(subset=[self.target], inplace=True)
except KeyError:
pass
# remove sepcial char from column names
# data.columns= data.columns.str.replace('[,]','')
# very first thing we need to so is to check if the training and test data hace same columns
for i in self.final_training_columns:
if i not in data.columns:
raise TypeError(
f"test data does not have column {i} which was used for training."
)
# just keep picking the data and keep applying to the test data set (be mindful of target variable)
for (
i
) in (
data.columns
): # we are taking all the columns in test , so we dot have to worry about droping target column
if i == self.target and (
(self.ml_usecase == "classification")
and (self.learned_dtypes[self.target] == "object")
):
data[i] = self.le.transform(data[i].apply(str).astype("object"))
data[i] = data[i].astype("int64")
else:
if self.learned_dtypes[i].name == "datetime64[ns]":
data[i] = pd.to_datetime(
data[i], infer_datetime_format=True, utc=False, errors="coerce"
)
data[i] = data[i].astype(self.learned_dtypes[i])
# drop time columns
# data.drop(self.drop_time,axis=1,errors='ignore',inplace=True)
# drop id columns
data.drop(self.id_columns, axis=1, errors="ignore", inplace=True)
return data
# fit_transform
def fit_transform(self, dataset, y=None):
data = dataset
# since this is for training , we dont nees any transformation since it has already been transformed in fit
data = self.fit(data)
# additionally we just need to treat the target variable
# for ml use ase
if (self.ml_usecase == "classification") & (
data[self.target].dtype == "object"
):
self.le = LabelEncoder()
data[self.target] = self.le.fit_transform(
data[self.target].apply(str).astype("object")
)
self.replacement = _get_labelencoder_reverse_dict(self.le)
# self.u = list(pd.unique(data[self.target]))
# self.replacement = np.arange(0,len(self.u))
# data[self.target]= data[self.target].replace(self.u,self.replacement)
# data[self.target] = data[self.target].astype('int64')
# self.replacement = pd.DataFrame(dict(target_variable=self.u,replaced_with=self.replacement))
# drop time columns
# data.drop(self.drop_time,axis=1,errors='ignore',inplace=True)
# drop id columns
data.drop(self.id_columns, axis=1, errors="ignore", inplace=True)
# finally save a list of columns that we would need from test data set
self.final_training_columns = data.columns.to_list()
self.final_training_columns.remove(self.target)
return data
# _______________________________________________________________________________________________________________________
# Imputation
class Simple_Imputer(_BaseImputer):
"""
Imputes all type of data (numerical,categorical & Time).
Highly recommended to run Define_dataTypes class first
Numerical values can be imputed with mean or median or filled with zeros
categorical missing values will be replaced with "Other"
Time values are imputed with the most frequesnt value
Ignores target (y) variable
Args:
Numeric_strategy: string , all possible values {'mean','median','zero'}
categorical_strategy: string , all possible values {'not_available','most frequent'}
target: string , name of the target variable
fill_value_numerical: number, value for filling missing values of numeric columns
fill_value_categorical: string, value for filling missing values of categorical columns
"""
_numeric_strategies = {
"mean": "mean",
"median": "median",
"most frequent": "most_frequent",
"zero": "constant",
}
_categorical_strategies = {
"most frequent": "most_frequent",
"not_available": "constant",
}
_time_strategies = {
"mean": "mean",
"median": "median",
"most frequent": "most_frequent",
}
def __init__(
self,
numeric_strategy,
categorical_strategy,
time_strategy,
target,
fill_value_numerical=0,
fill_value_categorical="not_available",
):
# Set the target variable, which we don't want to impute
self.target = target
if numeric_strategy not in self._numeric_strategies:
numeric_strategy = "zero"
self.numeric_strategy = numeric_strategy
if categorical_strategy not in self._categorical_strategies:
categorical_strategy = "most frequent"
self.categorical_strategy = categorical_strategy
if time_strategy not in self._time_strategies:
time_strategy = "most frequent"
self.time_strategy = time_strategy
self.fill_value_numerical = fill_value_numerical
self.fill_value_categorical = fill_value_categorical
# self.most_frequent_time = []
self.numeric_imputer = SimpleImputer(
strategy=self._numeric_strategies[self.numeric_strategy],
fill_value=fill_value_numerical,
)
self.categorical_imputer = SimpleImputer(
strategy=self._categorical_strategies[self.categorical_strategy],
fill_value=fill_value_categorical,
)
self.time_imputer = SimpleImputer(
strategy=self._time_strategies[self.time_strategy],
)
def fit(self, X, y=None):
"""
Fit the imputer on dataset.
Args:
X : pd.DataFrame, the dataset to be imputed
Returns:
self : Simple_Imputer
"""
try:
data = X.drop(self.target, axis=1)
except:
data = X
self.numeric_columns = data.select_dtypes(
include=["float32", "float64", "int32", "int64"]
).columns
self.categorical_columns = data.select_dtypes(
include=["object", "bool", "string", "category"]
).columns
self.time_columns = data.select_dtypes(
include=["datetime64[ns]", "timedelta64[ns]"]
).columns
statistics = []
if not self.numeric_columns.empty:
self.numeric_imputer.fit(data[self.numeric_columns])
statistics.append((self.numeric_imputer.statistics_, self.numeric_columns))
if not self.categorical_columns.empty:
self.categorical_imputer.fit(data[self.categorical_columns])
statistics.append(
(self.categorical_imputer.statistics_, self.categorical_columns)
)
if not self.time_columns.empty:
for col in self.time_columns:
data[col] = data[col][data[col].notnull()].astype(np.int64)
self.time_imputer.fit(data[self.time_columns])
statistics.append((self.time_imputer.statistics_, self.time_columns))
self.statistics_ = np.zeros(shape=len(data.columns), dtype=object)
columns = list(data.columns)
for s, index in statistics:
for i, j in enumerate(index):
self.statistics_[columns.index(j)] = s[i]
return self
def transform(self, X, y=None):
"""
Impute all missing values in dataset.
Args:
X: pd.DataFrame, the dataset to be imputed
Returns:
data: pd.DataFrame, the imputed dataset
"""
data = X
imputed_data = []
if not self.numeric_columns.empty:
numeric_data = pd.DataFrame(
self.numeric_imputer.transform(data[self.numeric_columns]),
columns=self.numeric_columns,
index=data.index,
)
imputed_data.append(numeric_data)
if not self.categorical_columns.empty:
categorical_data = pd.DataFrame(
self.categorical_imputer.transform(data[self.categorical_columns]),
columns=self.categorical_columns,
index=data.index,
)
for col in categorical_data.columns:
categorical_data[col] = categorical_data[col].apply(str)
imputed_data.append(categorical_data)
if not self.time_columns.empty:
datetime_columns = data.select_dtypes(include=["datetime"]).columns
timedelta_columns = data.select_dtypes(include=["timedelta"]).columns
timedata_copy = data[self.time_columns].copy()
for col in self.time_columns:
timedata_copy[col] = timedata_copy[col][
timedata_copy[col].notnull()
].astype(np.int64)
time_data = pd.DataFrame(
self.time_imputer.transform(timedata_copy),
columns=self.time_columns,
index=data.index,
)
for col in datetime_columns:
time_data[col][data[col].notnull()] = data[col][data[col].notnull()]
time_data[col] = time_data[col].apply(pd.Timestamp)
for col in timedelta_columns:
time_data[col][data[col].notnull()] = data[col][data[col].notnull()]
time_data[col] = time_data[col].apply(pd.Timedelta)
imputed_data.append(time_data)
if imputed_data:
data.update(pd.concat(imputed_data, axis=1))
data.astype(X.dtypes)
return data
def fit_transform(self, X, y=None):
"""
Fit and impute on dataset.
Args:
X: pd.DataFrame, the dataset to be fitted and imputed
Returns:
pd.DataFrame, the imputed dataset
"""
data = X
self.fit(data)
return self.transform(data)
# _______________________________________________________________________________________________________________________
# Imputation with surrogate columns
class Surrogate_Imputer(_BaseImputer):
"""
Imputes feature with surrogate column (numerical,categorical & Time).
- Highly recommended to run Define_dataTypes class first
- it is also recommended to only apply this to features where it makes business sense to creat surrogate column
- feature name has to be provided
- only able to handle one feature at a time
- Numerical values can be imputed with mean or median or filled with zeros
- categorical missing values will be replaced with "Other"
- Time values are imputed with the most frequesnt value
- Ignores target (y) variable
Args:
feature_name: string, provide features name
feature_type: string , all possible values {'numeric','categorical','date'}
strategy: string ,all possible values {'mean','median','zero','not_available','most frequent'}
target: string , name of the target variable
"""
def __init__(self, numeric_strategy, categorical_strategy, target):
self.numeric_strategy = numeric_strategy
self.target = target
self.categorical_strategy = categorical_strategy
def fit(self, dataset, y=None): #
def zeros(x):
return 0
data = dataset
# make a table for numerical variable with strategy stats
if self.numeric_strategy == "mean":
self.numeric_stats = (
data.drop(self.target, axis=1)
.select_dtypes(include=["float32", "float64", "int64"])
.apply(np.nanmean)
)
elif self.numeric_strategy == "median":
self.numeric_stats = (
data.drop(self.target, axis=1)
.select_dtypes(include=["float32", "float64", "int64"])
.apply(np.nanmedian)
)
else:
self.numeric_stats = (
data.drop(self.target, axis=1)
.select_dtypes(include=["float32", "float64", "int64"])
.apply(zeros)
)
self.numeric_columns = (
data.drop(self.target, axis=1)
.select_dtypes(include=["float32", "float64", "int64"])
.columns
)
# also need to learn if any columns had NA in training
self.numeric_na = pd.DataFrame(columns=self.numeric_columns)
for i in self.numeric_columns:
if data[i].isnull().any() == True:
self.numeric_na.loc[0, i] = True
else:
self.numeric_na.loc[0, i] = False
# for Catgorical ,
if self.categorical_strategy == "most frequent":
self.categorical_columns = (
data.drop(self.target, axis=1).select_dtypes(include=["object"]).columns
)
self.categorical_stats = pd.DataFrame(
columns=self.categorical_columns
) # place holder
for i in self.categorical_stats.columns:
self.categorical_stats.loc[0, i] = data[i].value_counts().index[0]
# also need to learn if any columns had NA in training, but this is only valid if strategy is "most frequent"
self.categorical_na = pd.DataFrame(columns=self.categorical_columns)
for i in self.categorical_columns:
if sum(data[i].isnull()) > 0:
self.categorical_na.loc[0, i] = True
else:
self.categorical_na.loc[0, i] = False
else:
self.categorical_columns = (
data.drop(self.target, axis=1).select_dtypes(include=["object"]).columns
)
self.categorical_na = pd.DataFrame(columns=self.categorical_columns)
self.categorical_na.loc[
0, :
] = False # (in this situation we are not making any surrogate column)
# for time, there is only one way, pick up the most frequent one
self.time_columns = (
data.drop(self.target, axis=1)
.select_dtypes(include=["datetime64[ns]"])
.columns
)
self.time_stats = pd.DataFrame(columns=self.time_columns) # place holder
self.time_na = pd.DataFrame(columns=self.time_columns)
for i in self.time_columns:
self.time_stats.loc[0, i] = data[i].value_counts().index[0]
# learn if time columns were NA
for i in self.time_columns:
if data[i].isnull().any() == True:
self.time_na.loc[0, i] = True
else:
self.time_na.loc[0, i] = False
return data # nothing to return
def transform(self, dataset, y=None):
data = dataset
# for numeric columns
for i, s in zip(data[self.numeric_columns].columns, self.numeric_stats):
array = data[i].isnull()
data[i].fillna(s, inplace=True)
# make a surrogate column if there was any
if self.numeric_na.loc[0, i] == True:
data[i + "_surrogate"] = array
# make it string
data[i + "_surrogate"] = data[i + "_surrogate"].apply(str)
# for categorical columns
if self.categorical_strategy == "most frequent":
for i in self.categorical_stats.columns:
# data[i].fillna(self.categorical_stats.loc[0,i],inplace=True)
array = data[i].isnull()
data[i] = data[i].fillna(self.categorical_stats.loc[0, i])
data[i] = data[i].apply(str)
# make surrogate column
if self.categorical_na.loc[0, i] == True:
data[i + "_surrogate"] = array
# make it string
data[i + "_surrogate"] = data[i + "_surrogate"].apply(str)
else: # this means replace na with "not_available"
for i in self.categorical_columns:
data[i].fillna("not_available", inplace=True)
data[i] = data[i].apply(str)
# no need to make surrogate since not_available is itself a new colum
# for time
for i in self.time_stats.columns:
array = data[i].isnull()
data[i].fillna(self.time_stats.loc[0, i], inplace=True)
# make surrogate column
if self.time_na.loc[0, i] == True:
data[i + "_surrogate"] = array
# make it string
data[i + "_surrogate"] = data[i + "_surrogate"].apply(str)
return data
def fit_transform(self, dataset, y=None):
data = dataset
data = self.fit(data)
return self.transform(data)
class Iterative_Imputer(_BaseImputer):
def __init__(
self,
regressor: BaseEstimator,
classifier: BaseEstimator,
*,
target=None,
missing_values=np.nan,
initial_strategy_numeric: str = "mean",
initial_strategy_categorical: str = "most frequent",
initial_strategy_time: str = "most frequent",
ordinal_columns: Optional[list] = None,
max_iter: int = 10,
warm_start: bool = False,
imputation_order: str = "ascending",
verbose: int = 0,
random_state: int = None,
add_indicator: bool = False,
):
super().__init__(missing_values=missing_values, add_indicator=add_indicator)
self.regressor = regressor
self.classifier = classifier
self.initial_strategy_numeric = initial_strategy_numeric
self.initial_strategy_categorical = initial_strategy_categorical
self.initial_strategy_time = initial_strategy_time
self.max_iter = max_iter
self.warm_start = warm_start
self.imputation_order = imputation_order
self.verbose = verbose
self.random_state = random_state
self.target = target
if ordinal_columns is None:
ordinal_columns = []
self.ordinal_columns = list(ordinal_columns)
self._column_cleaner = Clean_Colum_Names()
def _initial_imputation(self, X):
if self.initial_imputer_ is None:
self.initial_imputer_ = Simple_Imputer(
target="__TARGET__", # dummy value, we don't actually want to drop anything
numeric_strategy=self.initial_strategy_numeric,
categorical_strategy=self.initial_strategy_categorical,
time_strategy=self.initial_strategy_time,
)
X_filled = self.initial_imputer_.fit_transform(X)
else:
X_filled = self.initial_imputer_.transform(X)
return X_filled
def _impute_one_feature(self, X, column, X_na_mask, fit):
if not fit:
check_is_fitted(self)
is_classification = (
X[column].dtype.name == "object" or column in self.ordinal_columns
)
if is_classification:
if column in self.classifiers_:
time, dummy, le, estimator = self.classifiers_[column]
elif not fit:
return X
else:
estimator = clone(self._classifier)
time = Make_Time_Features()
dummy = Dummify(column)
le = LabelEncoder()
else:
if column in self.regressors_:
time, dummy, le, estimator = self.regressors_[column]
elif not fit:
return X
else:
estimator = clone(self._regressor)
time = Make_Time_Features()
dummy = Dummify(column)
le = None
if fit:
fit_kwargs = {}
X_train = X[~X_na_mask[column]]
y_train = X_train[column]
# catboost handles categoricals itself
if "catboost" not in str(type(estimator)).lower():
X_train = time.fit_transform(X_train)
X_train = dummy.fit_transform(X_train)
X_train.drop(column, axis=1, inplace=True)
else:
X_train.drop(column, axis=1, inplace=True)
fit_kwargs["cat_features"] = []
for i, col in enumerate(X_train.columns):
if X_train[col].dtype.name == "object":
X_train[col] = pd.Categorical(
X_train[col], ordered=column in self.ordinal_columns
)
fit_kwargs["cat_features"].append(i)
fit_kwargs["cat_features"] = np.array(
fit_kwargs["cat_features"], dtype=int
)
X_train = self._column_cleaner.fit_transform(X_train)
if le:
y_train = le.fit_transform(y_train)
try:
assert self.warm_start
estimator.partial_fit(X_train, y_train)
except:
estimator.fit(X_train, y_train, **fit_kwargs)
X_test = X.drop(column, axis=1)[X_na_mask[column]]
X_test = time.transform(X_test)
# catboost handles categoricals itself
if "catboost" not in str(type(estimator)).lower():
X_test = dummy.transform(X_test)
else:
for col in X_test.select_dtypes("object").columns:
X_test[col] = pd.Categorical(
X_test[col], ordered=column in self.ordinal_columns
)
result = estimator.predict(X_test)
if le:
result = le.inverse_transform(result)
if fit:
if is_classification:
self.classifiers_[column] = (time, dummy, le, estimator)
else:
self.regressors_[column] = (time, dummy, le, estimator)
if result.dtype.name == "float64":
result = result.astype("float32")
X_test[column] = result
X.update(X_test[column])
gc.collect()
return X
def _impute(self, X, fit: bool):
if self.target in X.columns:
target_column = X[self.target]
X = X.drop(self.target, axis=1)
else:
target_column = None
original_columns = X.columns
original_index = X.index
X = X.reset_index(drop=True)
X = self._column_cleaner.fit_transform(X)
self.imputation_sequence_ = (
X.isnull().sum().sort_values(ascending=self.imputation_order == "ascending")
)
self.imputation_sequence_ = [
col
for col in self.imputation_sequence_[self.imputation_sequence_ > 0].index
if X[col].dtype.name != "datetime64[ns]"
]
X_na_mask = X.isnull()
X_imputed = self._initial_imputation(X.copy())
for i in range(self.max_iter if fit else 1):
for feature in self.imputation_sequence_:
get_logger().info(f"Iterative Imputation: {i+1} cycle | {feature}")
X_imputed = self._impute_one_feature(X_imputed, feature, X_na_mask, fit)
X_imputed.columns = original_columns
X_imputed.index = original_index
if target_column is not None:
X_imputed[self.target] = target_column
return X_imputed
def transform(self, X, y=None, **fit_params):
return self._impute(X, fit=False)
def fit_transform(self, X, y=None, **fit_params):
self.random_state_ = getattr(
self, "random_state_", check_random_state(self.random_state)
)
if self.regressor is None:
raise ValueError("No regressor provided")
else:
self._regressor = clone(self.regressor)
try:
self._regressor.set_param(random_state=self.random_state_)
except:
pass
if self.classifier is None:
raise ValueError("No classifier provided")
else:
self._classifier = clone(self.classifier)
try:
self._classifier.set_param(random_state=self.random_state_)
except:
pass
self.classifiers_ = {}
self.regressors_ = {}
self.initial_imputer_ = None
return self._impute(X, fit=True)
def fit(self, X, y=None, **fit_params):
self.fit_transform(X, y=y, **fit_params)
return self
# _______________________________________________________________________________________________________________________
# Zero and Near Zero Variance
class Zroe_NearZero_Variance(BaseEstimator, TransformerMixin):
"""
- it eliminates the features having zero variance
- it eliminates the features haveing near zero variance
- Near zero variance is determined by
-1) Count of unique points divided by the total length of the feature has to be lower than a pre sepcified threshold
-2) Most common point(count) divided by the second most common point(count) in the feature is greater than a pre specified threshold
Once both conditions are met , the feature is dropped
-Ignores target variable
Args:
threshold_1: float (between 0.0 to 1.0) , default is .10
threshold_2: int (between 1 to 100), default is 20
tatget variable : string, name of the target variable
"""
def __init__(self, target, threshold_1=0.1, threshold_2=20):
self.threshold_1 = threshold_1
self.threshold_2 = threshold_2
self.target = target
def fit(
self, dataset, y=None
): # from training data set we are going to learn what columns to drop
data = dataset
self.to_drop = []
sampl_len = len(data[self.target])
for i in data.drop(self.target, axis=1).columns:
# get the number of unique counts
u = pd.DataFrame(data[i].value_counts()).sort_values(
by=i, ascending=False, inplace=False
)
# take len of u and divided it by the total sample numbers, so this will check the 1st rule , has to be low say 10%
# import pdb; pdb.set_trace()
first = len(u) / sampl_len
# then check if most common divided by 2nd most common ratio is 20 or more
if (
len(u[i]) == 1
): # this means that if column is non variance , automatically make the number big to drop it
second = 100
else:
second = u.iloc[0, 0] / u.iloc[1, 0]
# if both conditions are true then drop the column, however, we dont want to alter column that indicate NA's
if (first <= 0.10) and (second >= 20) and (i[-10:] != "_surrogate"):
self.to_drop.append(i)
# now drop if the column has zero variance
if (second == 100) and (i[-10:] != "_surrogate"):
self.to_drop.append(i)
def transform(
self, dataset, y=None
): # since it is only for training data set , nothing here
data = dataset.drop(self.to_drop, axis=1)
return data
def fit_transform(self, dataset, y=None):
data = dataset
self.fit(data)
return self.transform(data)
# ____________________________________________________________________________________________________________________________
# rare catagorical variables
class Catagorical_variables_With_Rare_levels(BaseEstimator, TransformerMixin):
"""
-Merges levels in catagorical features with more frequent level if they appear less than a threshold count
e.g. Col=[a,a,a,a,b,b,c,c]
if threshold is set to 2 , then c will be mrged with b because both are below threshold
There has to be atleast two levels belwo threshold for this to work
the process will keep going until all the levels have atleast 2(threshold) counts
-Only handles catagorical features
-It is recommended to run the Zroe_NearZero_Variance and Define_dataTypes first
-Ignores target variable
Args:
threshold: int , default 10
target: string , name of the target variable
new_level_name: string , name given to the new level generated, default 'others'
"""
def __init__(self, target, new_level_name="others_infrequent", threshold=0.05):
self.threshold = threshold
self.target = target
self.new_level_name = new_level_name
def fit(
self, dataset, y=None
): # we will learn for what columnns what are the level to merge as others
# every level of the catagorical feature has to be more than threshols, if not they will be clubed togather as "others"
# in order to apply, there should be atleast two levels belwo the threshold !
# creat a place holder
data = dataset
self.ph = pd.DataFrame(
columns=data.drop(self.target, axis=1)
.select_dtypes(include="object")
.columns
)
# ph.columns = df.columns# catagorical only
for i in data[self.ph.columns].columns:
# determine the infrequebt count
v_c = data[i].value_counts()
count_th = round(v_c.quantile(self.threshold))
a = np.sum(
pd.DataFrame(data[i].value_counts().sort_values())[i] <= count_th
)
if a >= 2: # rare levels has to be atleast two
count = pd.DataFrame(data[i].value_counts().sort_values())
count.columns = ["fre"]
count = count[count["fre"] <= count_th]
to_club = list(count.index)
self.ph.loc[0, i] = to_club
else:
self.ph.loc[0, i] = []
# # also need to make a place holder that keep records of all the levels , and in case a new level appears in test we will change it to others
# self.ph_level = pd.DataFrame(columns=data.drop(self.target,axis=1).select_dtypes(include="object").columns)
# for i in self.ph_level.columns:
# self.ph_level.loc[0,i] = list(data[i].value_counts().sort_values().index)
def transform(self, dataset, y=None): #
# transorm
data = dataset
for i in data[self.ph.columns].columns:
t_replace = self.ph.loc[0, i]
data[i].replace(
to_replace=t_replace, value=self.new_level_name, inplace=True
)
return data
def fit_transform(self, dataset, y=None):
data = dataset
self.fit(data)
return self.transform(data)
# _______________________________________________________________________________________________________________________
# new catagorical level in test
class New_Catagorical_Levels_in_TestData(BaseEstimator, TransformerMixin):
"""
-This treats if a new level appears in the test dataset catagorical's feature (i.e a level on whihc model was not trained previously)
-It simply replaces the new level in test data set with the most frequent or least frequent level in the same feature in the training data set
-It is recommended to run the Zroe_NearZero_Variance and Define_dataTypes first
-Ignores target variable
Args:
target: string , name of the target variable
replacement_strategy:string , 'raise exception', 'least frequent' or 'most frequent' (default 'most frequent' )
"""
def __init__(self, target, replacement_strategy="most frequent"):
self.target = target
self.replacement_strategy = replacement_strategy
def fit(self, data, y=None):
# need to make a place holder that keep records of all the levels , and in case a new level appears in test we will change it to others
self.ph_train_level = pd.DataFrame(
columns=data.drop(self.target, axis=1)
.select_dtypes(include="object")
.columns
)
for i in self.ph_train_level.columns:
if self.replacement_strategy == "least frequent":
self.ph_train_level.loc[0, i] = list(
data[i].value_counts().sort_values().index
)
else:
self.ph_train_level.loc[0, i] = list(data[i].value_counts().index)
def transform(self, data, y=None): #
# transorm
# we need to learn the same for test data , and then we will compare to check what levels are new in there
self.ph_test_level = pd.DataFrame(
columns=data.drop(self.target, axis=1, errors="ignore")
.select_dtypes(include="object")
.columns
)
for i in self.ph_test_level.columns:
self.ph_test_level.loc[0, i] = list(
data[i].value_counts().sort_values().index
)
# new we have levels for both test and train, we will start comparing and replacing levels in test set (Only if test set has new levels)
for i in self.ph_test_level.columns:
new = list(
(set(self.ph_test_level.loc[0, i]) - set(self.ph_train_level.loc[0, i]))
)
# now if there is a difference , only then replace it
if len(new) > 0:
if self.replacement_strategy == "raise exception":
raise ValueError(
f"Column '{i}' contains levels '{new}' which were not present in train data."
)
data[i].replace(new, self.ph_train_level.loc[0, i][0], inplace=True)
return data
def fit_transform(
self, data, y=None
): # There is no transformation happening in training data set, its all about test
self.fit(data)
return data
# _______________________________________________________________________________________________________________________
# Group akin features
class Group_Similar_Features(BaseEstimator, TransformerMixin):
"""
- Given a list of features , it creates aggregate features
- features created are Min, Max, Mean, Median, Mode & Std
- Only works on numerical features
Args:
list_of_similar_features: list of list, string , e.g. [['col',col2],['col3','col4']]
group_name: list, group name/names to be added as prefix to aggregate features, e.g ['gorup1','group2']
"""
def __init__(self, group_name=[], list_of_grouped_features=[[]]):
self.list_of_similar_features = list_of_grouped_features
self.group_name = group_name
# if list of list not given
try:
np.array(self.list_of_similar_features).shape[0]
except:
raise (
"Group_Similar_Features: list_of_grouped_features is not provided as list of list"
)
def fit(self, data, y=None):
# nothing to learn
return self
def transform(self, dataset, y=None):
data = dataset
# # only going to process if there is an actual missing value in training data set
if len(self.list_of_similar_features) > 0:
for f, g in zip(self.list_of_similar_features, self.group_name):
data[g + "_Min"] = data[f].apply(np.min, 1)
data[g + "_Max"] = data[f].apply(np.max, 1)
data[g + "_Mean"] = data[f].apply(np.mean, 1)
data[g + "_Median"] = data[f].apply(np.median, 1)
data[g + "_Mode"] = stats.mode(data[f], 1)[0]
data[g + "_Std"] = data[f].apply(np.std, 1)
return data
else:
return data
def fit_transform(self, data, y=None):
return self.transform(data)
# ____________________________________________________________________________________________________________________________________________________________________
# Binning for Continious
class Binning(BaseEstimator, TransformerMixin):
"""
- Converts numerical variables to catagorical variable through binning
- Number of binns are automitically determined through Sturges method
- Once discretize, original feature will be dropped
Args:
features_to_discretize: list of featur names to be binned
"""
def __init__(self, features_to_discretize):
self.features_to_discretize = features_to_discretize
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
# only do if features are provided
if len(self.features_to_discretize) > 0:
data_t = self.disc.transform(
np.array(data[self.features_to_discretize]).reshape(
-1, self.len_columns
)
)
# make pandas data frame
data_t = pd.DataFrame(
data_t, columns=self.features_to_discretize, index=data.index
)
# all these columns are catagorical
data_t = data_t.astype(str)
# drop original columns
data.drop(self.features_to_discretize, axis=1, inplace=True)
# add newly created columns
data = pd.concat((data, data_t), axis=1)
return data
def fit_transform(self, dataset, y=None):
data = dataset
# only do if features are given
if len(self.features_to_discretize) > 0:
# place holder for all the features for their binns
self.binns = []
for i in self.features_to_discretize:
# get numbr of binns
hist, _ = np.histogram(data[i], bins="sturges")
self.binns.append(len(hist))
# how many colums to deal with
self.len_columns = len(self.features_to_discretize)
# now do fit transform
self.disc = KBinsDiscretizer(
n_bins=self.binns, encode="ordinal", strategy="kmeans"
)
data_t = self.disc.fit_transform(
np.array(data[self.features_to_discretize]).reshape(
-1, self.len_columns
)
)
# make pandas data frame
data_t = pd.DataFrame(
data_t, columns=self.features_to_discretize, index=data.index
)
# all these columns are catagorical
data_t = data_t.astype(str)
# drop original columns
data.drop(self.features_to_discretize, axis=1, inplace=True)
# add newly created columns
data = pd.concat((data, data_t), axis=1)
return data
# ______________________________________________________________________________________________________________________
# Scaling & Power Transform
class Scaling_and_Power_transformation(BaseEstimator, TransformerMixin):
"""
-Given a data set, applies Min Max, Standar Scaler or Power Transformation (yeo-johnson)
-it is recommended to run Define_dataTypes first
- ignores target variable
Args:
target: string , name of the target variable
function_to_apply: string , default 'zscore' (standard scaler), all other {'minmaxm','yj','quantile','robust','maxabs'} ( min max,yeo-johnson & quantile power transformation, robust and MaxAbs scaler )
"""
def __init__(self, target, function_to_apply="zscore", random_state_quantile=42):
self.target = target
self.function_to_apply = function_to_apply
self.random_state_quantile = random_state_quantile
# self.transform_target = transform_target
# self.ml_usecase = ml_usecase
def fit(self, dataset, y=None):
data = dataset
# we only want to apply if there are numeric columns
self.numeric_features = (
data.drop(self.target, axis=1, errors="ignore")
.select_dtypes(include=["float32", "float64", "int64"])
.columns
)
if len(self.numeric_features) > 0:
if self.function_to_apply == "zscore":
self.scale_and_power = StandardScaler()
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "minmax":
self.scale_and_power = MinMaxScaler()
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "yj":
self.scale_and_power = PowerTransformer(
method="yeo-johnson", standardize=True
)
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "quantile":
self.scale_and_power = QuantileTransformer(
random_state=self.random_state_quantile,
output_distribution="normal",
)
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "robust":
self.scale_and_power = RobustScaler()
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "maxabs":
self.scale_and_power = MaxAbsScaler()
self.scale_and_power.fit(data[self.numeric_features])
return self
def transform(self, dataset, y=None):
data = dataset
if len(self.numeric_features) > 0:
self.data_t = pd.DataFrame(
self.scale_and_power.transform(data[self.numeric_features])
)
# we need to set the same index as original data
self.data_t.index = data.index
self.data_t.columns = self.numeric_features
for i in self.numeric_features:
data[i] = self.data_t[i]
return data
else:
return data
def fit_transform(self, dataset, y=None):
data = dataset
self.fit(data)
# convert target if appropriate
# default behavious is quantile transformer
# if ((self.ml_usecase == 'regression') and (self.transform_target == True)):
# self.scale_and_power_target = QuantileTransformer(random_state=self.random_state_quantile,output_distribution='normal')
# data[self.target]=self.scale_and_power_target.fit_transform(np.array(data[self.target]).reshape(-1,1))
return self.transform(data)
# ______________________________________________________________________________________________________________________
# Scaling & Power Transform
class Target_Transformation(BaseEstimator, TransformerMixin):
"""
- Applies Power Transformation (yeo-johnson , Box-Cox) to target variable (Applicable to Regression only)
- 'bc' for Box_Coc & 'yj' for yeo-johnson, default is Box-Cox
- if target containes negtive / zero values , yeo-johnson is automatically selected
"""
def __init__(self, target, function_to_apply="bc"):
self.target = target
if function_to_apply == "bc":
function_to_apply = "box-cox"
else:
function_to_apply = "yeo-johnson"
self.function_to_apply = function_to_apply
def inverse_transform(self, dataset, y=None):
data = self.p_transform_target.inverse_transform(
np.array(dataset).reshape(-1, 1)
)
return data
def fit(self, dataset, y=None):
self.fit_transform(dataset, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
if self.target in dataset.columns:
# apply transformation
data[self.target] = self.p_transform_target.transform(
np.array(data[self.target]).reshape(-1, 1)
)
return data
def fit_transform(self, dataset, y=None):
data = dataset
# if target has zero or negative values use yj instead
if any(data[self.target] <= 0):
self.function_to_apply = "yeo-johnson"
# apply transformation
self.p_transform_target = PowerTransformer(method=self.function_to_apply)
data[self.target] = self.p_transform_target.fit_transform(
np.array(data[self.target]).reshape(-1, 1)
)
return data
# __________________________________________________________________________________________________________________________
# Time feature extractor
class Make_Time_Features(BaseEstimator, TransformerMixin):
"""
-Given a time feature , it extracts more features
- Only accepts / works where feature / data type is datetime64[ns]
- full list of features is:
['month','weekday',is_month_end','is_month_start','hour']
- all extracted features are defined as string / object
-it is recommended to run Define_dataTypes first
Args:
time_feature: list of feature names as datetime64[ns] , default empty/none , if empty/None , it will try to pickup dates automatically where data type is datetime64[ns]
list_of_features: list of required features , default value ['month','weekday','is_month_end','is_month_start','hour']
"""
def __init__(
self,
time_feature=None,
list_of_features=["month", "weekday", "is_month_end", "is_month_start", "hour"],
):
self.time_feature = time_feature
self.list_of_features = set(list_of_features)
def fit(self, data, y=None):
if self.time_feature is None:
self.time_feature = data.select_dtypes(include=["datetime64[ns]"]).columns
self.has_hour_ = set()
for i in self.time_feature:
if "hour" in self.list_of_features:
if any(x.hour for x in data[i]):
self.has_hour_.add(i)
return self
def transform(self, dataset, y=None):
data = dataset.copy()
# run fit transform first
def get_time_features(r):
features = []
if "month" in self.list_of_features:
features.append(("_month", str(r.month)))
if "weekday" in self.list_of_features:
features.append(("_weekday", str(r.weekday())))
if "is_month_end" in self.list_of_features:
features.append(
(
"_is_month_end",
"1"
if calendar.monthrange(r.year, r.month)[1] == r.day
else "0",
)
)
if "is_month_start" in self.list_of_features:
features.append(("_is_month_start", "1" if r.day == 1 else "0"))
return tuple(features)
# start making features for every column in the time list
for i in self.time_feature:
list_of_features = [get_time_features(r) for r in data[i]]
fd = defaultdict(list)
for x in list_of_features:
for k, v in x:
fd[k].append(v)
for k, v in fd.items():
data[i + k] = v
# make hour column if choosen
if "hour" in self.list_of_features and i in self.has_hour_:
h = [r.hour for r in data[i]]
data[f"{i}_hour"] = h
data[f"{i}_hour"] = data[f"{i}_hour"].apply(str)
# we dont need time columns any more
data.drop(self.time_feature, axis=1, inplace=True)
return data
def fit_transform(self, dataset, y=None):
# if no columns names are given , then pick datetime columns
self.fit(dataset, y=y)
return self.transform(dataset, y=y)
# ____________________________________________________________________________________________________________________________________________________________________
# Ordinal transformer
class Ordinal(BaseEstimator, TransformerMixin):
"""
- converts categorical features into ordinal values
- takes a dataframe , and information about column names and ordered categories as dict
- returns float panda data frame
"""
def __init__(self, info_as_dict):
self.info_as_dict = info_as_dict
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
new_data_test = pd.DataFrame(
self.enc.transform(data[self.info_as_dict.keys()]),
columns=self.info_as_dict.keys(),
index=data.index,
)
for i in self.info_as_dict.keys():
data[i] = new_data_test[i]
return data
def fit_transform(self, dataset, y=None):
data = dataset
# creat categories from given keys in the data set
cat_list = []
for i in self.info_as_dict.values():
i = [np.array(i)]
cat_list = cat_list + i
# now do fit transform
self.enc = OrdinalEncoder(categories=cat_list)
new_data_train = pd.DataFrame(
self.enc.fit_transform(data.loc[:, self.info_as_dict.keys()]),
columns=self.info_as_dict,
index=data.index,
)
# new_data = pd.DataFrame(self.enc.fit_transform(data.loc[:,self.info_as_dict.keys()]))
for i in self.info_as_dict.keys():
data[i] = new_data_train[i]
return data
# _______________________________________________________________________________________________________________________
# make dummy variables
class Dummify(BaseEstimator, TransformerMixin):
"""
- makes one hot encoded variables for dummy variable
- it is HIGHLY recommended to run the Select_Data_Type class first
- Ignores target variable
Args:
target: string , name of the target variable
"""
def __init__(self, target):
self.target = target
# creat ohe object
self.ohe = OneHotEncoder(handle_unknown="ignore", dtype=np.float32)
def fit(self, X, y=None):
data = X
# will only do this if there are categorical variables
if len(data.select_dtypes(include=("object")).columns) > 0:
# we need to learn the column names once the training data set is dummify
# save non categorical data
self.data_nonc = data.drop(
self.target, axis=1, errors="ignore"
).select_dtypes(exclude=("object"))
if self.target in data.columns:
self.target_column = data[[self.target]]
else:
self.target_column = None
# # plus we will only take object data types
categorical_data = data.drop(
self.target, axis=1, errors="ignore"
).select_dtypes(include=("object"))
# # now fit the training column
self.ohe.fit(categorical_data)
self.data_columns = self.ohe.get_feature_names(categorical_data.columns)
return self
def transform(self, X, y=None):
data = X.copy()
# will only do this if there are categorical variables
if len(data.select_dtypes(include=("object")).columns) > 0:
# only for test data
self.data_nonc = data.drop(
self.target, axis=1, errors="ignore"
).select_dtypes(exclude=("object"))
# fit without target and only categorical columns
array = self.ohe.transform(
data.drop(self.target, axis=1, errors="ignore").select_dtypes(
include=("object")
)
).toarray()
data_dummies = pd.DataFrame(array, columns=self.data_columns)
data_dummies.index = self.data_nonc.index
if self.target in data.columns:
target_column = data[[self.target]]
else:
target_column = None
# now put target , numerical and categorical variables back togather
data = pd.concat((target_column, self.data_nonc, data_dummies), axis=1)
del self.data_nonc
return data
else:
return data
def fit_transform(self, dataset, y=None):
data = dataset.copy()
# will only do this if there are categorical variables
if len(data.select_dtypes(include=("object")).columns) > 0:
self.fit(data)
# fit without target and only categorical columns
array = self.ohe.transform(
data.drop(self.target, axis=1, errors="ignore").select_dtypes(
include=("object")
)
).toarray()
data_dummies = pd.DataFrame(array, columns=self.data_columns)
data_dummies.index = self.data_nonc.index
# now put target , numerical and categorical variables back togather
data =
|
pd.concat((self.target_column, self.data_nonc, data_dummies), axis=1)
|
pandas.concat
|
import argparse
from predictor import *
from tools import *
from getVariantOverlap import *
import pandas as pd
import numpy as np
import sys, traceback, os, os.path
import time
def get_model_argument_parser():
class formatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter):
pass
parser = argparse.ArgumentParser(description='Predict enhancer relative effects.',
formatter_class=formatter)
readable = argparse.FileType('r')
#Basic parameters
parser.add_argument('--enhancers', required=True, help="Candidate enhancer regions. Formatted as the EnhancerList.txt file produced by run.neighborhoods.py")
parser.add_argument('--genes', required=True, help="Genes to make predictions for. Formatted as the GeneList.txt file produced by run.neighborhoods.py")
parser.add_argument('--outdir', required=True, help="output directory")
parser.add_argument('--window', type=int, default=5000000, help="Make predictions for all candidate elements within this distance of the gene's TSS")
parser.add_argument('--score_column', default='ABC.Score', help="Column name of score to use for thresholding")
parser.add_argument('--threshold', type=float, required=True, default=.022, help="Threshold on ABC Score (--score_column) to call a predicted positive")
parser.add_argument('--cellType', help="Name of cell type")
parser.add_argument('--chrom_sizes', help="Chromosome sizes file")
#hic
#To do: validate params
parser.add_argument('--HiCdir', default=None, help="HiC directory")
parser.add_argument('--hic_resolution', type=int, help="HiC resolution")
parser.add_argument('--tss_hic_contribution', type=float, default=100, help="Weighting of diagonal bin of hic matrix as a percentage of the maximum of its neighboring bins")
parser.add_argument('--hic_pseudocount_distance', type=int, default=1e6, help="A pseudocount is added equal to the powerlaw fit at this distance")
parser.add_argument('--hic_type', default = 'juicebox', choices=['juicebox','bedpe'], help="format of hic files")
parser.add_argument('--hic_is_doubly_stochastic', action='store_true', help="If hic matrix is already DS, can skip this step")
#Power law
parser.add_argument('--scale_hic_using_powerlaw', action="store_true", help="Scale Hi-C values using powerlaw relationship")
parser.add_argument('--hic_gamma', type=float, default=.87, help="powerlaw exponent of hic data. Must be positive")
parser.add_argument('--hic_gamma_reference', type=float, default=.87, help="powerlaw exponent to scale to. Must be positive")
#Genes to run through model
parser.add_argument('--run_all_genes', action='store_true', help="Do not check for gene expression, make predictions for all genes")
parser.add_argument('--expression_cutoff', type=float, default=1, help="Make predictions for genes with expression higher than this value")
parser.add_argument('--promoter_activity_quantile_cutoff', type=float, default=.4, help="Quantile cutoff on promoter activity. Used to consider a gene 'expressed' in the absence of expression data")
#Output formatting
parser.add_argument('--make_all_putative', action="store_true", help="Make big file with concatenation of all genes file")
parser.add_argument('--use_hdf5', action="store_true", help="Write AllPutative file in hdf5 format instead of tab-delimited")
#Other
parser.add_argument('--tss_slop', type=int, default=500, help="Distance from tss to search for self-promoters")
parser.add_argument('--chromosomes', default="all", help="chromosomes to make predictions for. Defaults to intersection of all chromosomes in --genes and --enhancers")
parser.add_argument('--include_chrY', '-y', action='store_true', help="Make predictions on Y chromosome")
return parser
def get_predict_argument_parser():
parser = get_model_argument_parser()
return parser
def main():
parser = get_predict_argument_parser()
args = parser.parse_args()
validate_args(args)
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
write_params(args, os.path.join(args.outdir, "parameters.predict.txt"))
print("reading genes")
genes = pd.read_csv(args.genes, sep = "\t")
genes = determine_expressed_genes(genes, args.expression_cutoff, args.promoter_activity_quantile_cutoff)
genes = genes.loc[:,['chr','symbol','tss','Expression','PromoterActivityQuantile','isExpressed']]
genes.columns = ['chr','TargetGene', 'TargetGeneTSS', 'TargetGeneExpression', 'TargetGenePromoterActivityQuantile','TargetGeneIsExpressed']
print("reading enhancers")
enhancers_full = pd.read_csv(args.enhancers, sep = "\t")
#TO DO
#Think about which columns to include
enhancers = enhancers_full.loc[:,['chr','start','end','name','class','activity_base']]
#Initialize Prediction files
pred_file_full = os.path.join(args.outdir, "EnhancerPredictionsFull.txt")
pred_file_slim = os.path.join(args.outdir, "EnhancerPredictions.txt")
pred_file_bedpe = os.path.join(args.outdir, "EnhancerPredictions.bedpe")
all_pred_file_expressed = os.path.join(args.outdir, "EnhancerPredictionsAllPutative.txt.gz")
all_pred_file_nonexpressed = os.path.join(args.outdir, "EnhancerPredictionsAllPutativeNonExpressedGenes.txt.gz")
variant_overlap_file = os.path.join(args.outdir, "EnhancerPredictionsAllPutative.ForVariantOverlap.shrunk150bp.txt.gz")
all_putative_list = []
#Make predictions
if args.chromosomes == "all":
chromosomes = set(genes['chr']).intersection(set(enhancers['chr']))
if not args.include_chrY:
chromosomes.discard('chrY')
else:
chromosomes = args.chromosomes.split(",")
for chromosome in chromosomes:
print('Making predictions for chromosome: {}'.format(chromosome))
t = time.time()
this_enh = enhancers.loc[enhancers['chr'] == chromosome, :].copy()
this_genes = genes.loc[genes['chr'] == chromosome, :].copy()
this_chr = make_predictions(chromosome, this_enh, this_genes, args)
all_putative_list.append(this_chr)
print('Completed chromosome: {}. Elapsed time: {} \n'.format(chromosome, time.time() - t))
# Subset predictions
print("Writing output files...")
all_putative =
|
pd.concat(all_putative_list)
|
pandas.concat
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
|
tm.assert_frame_equal(result, expected)
|
pandas.util.testing.assert_frame_equal
|
import numpy as np
from torch.utils.data import Dataset
from src.models.lang_model.w2v_averager_model import W2vAveragerModel
from sklearn.preprocessing import StandardScaler
import datetime
import pandas as pd
from copy import deepcopy
# import matplotlib.pyplot as plt
"""
Data import functions
"""
def make_dfs(paths):
df = []
for path in paths:
df.append(pd.read_json(path))
df = pd.concat(df)
return df
def UUID_to_int(uuid_list):
map_dict = {}
for i, uuid in enumerate(uuid_list):
map_dict[uuid] = i
return map_dict
def map_id(row, col_name, map_dict):
val = row[col_name]
return map_dict[val]
def normalized_seconds(date_series):
"""Given a series of strings in the format
year-month-day, return a series of floats which
are normalized (mean 0 and sd 1) unix time"""
scaler = StandardScaler()
date_string_list = list(date_series)
y_m_d_list = [[int(x) for x in date.split('-')] for date in date_string_list]
unix_times = [datetime.datetime(y, m, d).strftime("%s") for y, m, d in y_m_d_list]
reshaped_u_ts = np.array(unix_times).reshape(-1, 1).astype('float64')
np_times = scaler.fit_transform(reshaped_u_ts)
return pd.DataFrame(np_times, columns=['Date'])
def top_k_one_hot(series, k):
"""Given a pandas series of categorical labels,
return a one-hot encoding of the top k
most-frequent labels, with all others under an
'other' label."""
series = series.copy()
counts = series.value_counts()
mask = series.isin(list(counts.iloc[:k].index))
series[~mask] = 'Other'
return pd.get_dummies(series)
def series_to_w2v(series, averager, prefix):
"""Given a pandas series and a W2vAveragerModel object,
return a dataframe with columns prefix_n
where n goes up to the size of the returned embedding"""
w2v_tensor = averager(list(series))
embed_size = w2v_tensor.data.shape[1]
col_names = ['{}_{}'.format(prefix, n) for n in range(embed_size)]
return pd.DataFrame(w2v_tensor.data.numpy(), columns=col_names)
def munge_metadata(df):
"""Given a dataframe with metadata,
return a one-hot encoded version of that
dataframe"""
# One-hot encoding of the parties, states
new_df = df.copy()
if 'party' in df.columns:
parties =
|
pd.get_dummies(new_df['party'])
|
pandas.get_dummies
|
import sqlite3
import pandas as pd
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
from bokeh.plotting import figure, ColumnDataSource, show
from bokeh.models import HoverTool
import numpy as np
import scipy as sp
from pylab import plot,show
from numpy import vstack,array
from numpy.random import rand
from scipy.cluster.vq import kmeans,vq
from mpl_toolkits.mplot3d import Axes3D
from bokeh.io import output_notebook
import matplotlib.pyplot as plt
import seaborn as sns
database = './data/database.sqlite'
conn = sqlite3.connect(database)
cur = conn.cursor()
query = "SELECT name FROM sqlite_master WHERE type='table';"
pd.read_sql(query, conn)
query = "SELECT * FROM Player;"
a = pd.read_sql(query, conn)
a.head()
query = "SELECT * FROM Player_Attributes;"
a = pd.read_sql(query, conn)
a.head()
query = """SELECT * FROM Player_Attributes a
INNER JOIN (SELECT player_name, player_api_id AS p_id FROM Player) b ON a.player_api_id = b.p_id;"""
drop_cols = ['id','date','preferred_foot',
'attacking_work_rate','defensive_work_rate']
players = pd.read_sql(query, conn)
players['date'] =
|
pd.to_datetime(players['date'])
|
pandas.to_datetime
|
"""The main module handling the simulation"""
import copy
import datetime
import logging
import os
import pickle
import queue
import random
import sys
import threading
import warnings
from functools import lru_cache
from pprint import pformat # TODO set some defaults for width/etc with partial?
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pap
import tqdm
from ..numerical_libs import enable_cupy, reimport_numerical_libs, xp, xp_ivp
from ..util.distributions import approx_mPERT_sample, truncnorm
from ..util.util import TqdmLoggingHandler, _banner
from .arg_parser_model import parser
from .estimation import estimate_Rt
from .exceptions import SimulationException
from .graph import buckyGraphData
from .mc_instance import buckyMCInstance
from .npi import get_npi_params
from .parameters import buckyParams
from .rhs import RHS_func
from .state import buckyState
# supress pandas warning caused by pyarrow
warnings.simplefilter(action="ignore", category=FutureWarning)
# TODO we do alot of allowing div by 0 and then checking for nans later, we should probably refactor that
warnings.simplefilter(action="ignore", category=RuntimeWarning)
@lru_cache(maxsize=None)
def get_runid(): # TODO move to util and rename to timeid or something
"""Gets a UUID based of the current datatime and caches it"""
dt_now = datetime.datetime.now()
return str(dt_now).replace(" ", "__").replace(":", "_").split(".")[0]
def frac_last_n_vals(arr, n, axis=0, offset=0): # TODO assumes come from end of array currently, move to util
"""Return the last n values along an axis of an array; if n is a float, include the fractional amount of the int(n)-1 element"""
int_slice_ind = (
[slice(None)] * (axis)
+ [slice(-int(n + offset), -int(xp.ceil(offset)) or None)]
+ [slice(None)] * (arr.ndim - axis - 1)
)
ret = arr[int_slice_ind]
# handle fractional element before the standard slice
if (n + offset) % 1:
frac_slice_ind = (
[slice(None)] * (axis)
+ [slice(-int(n + offset + 1), -int(n + offset))]
+ [slice(None)] * (arr.ndim - axis - 1)
)
ret = xp.concatenate((((n + offset) % 1) * arr[frac_slice_ind], ret), axis=axis)
# handle fractional element after the standard slice
if offset % 1:
frac_slice_ind = (
[slice(None)] * (axis)
+ [slice(-int(offset + 1), -int(offset) or None)]
+ [slice(None)] * (arr.ndim - axis - 1)
)
ret = xp.concatenate((ret, (1.0 - (offset % 1)) * arr[frac_slice_ind]), axis=axis)
return ret
class buckyModelCovid:
"""Class that handles one full simulation (both time integration and managing MC states)"""
def __init__(
self,
debug=False,
sparse_aij=False,
t_max=None,
graph_file=None,
par_file=None,
npi_file=None,
disable_npi=False,
reject_runs=False,
):
"""Initialize the class, do some bookkeeping and read in the input graph"""
self.debug = debug
self.sparse = sparse_aij # we can default to none and autodetect
# w/ override (maybe when #adm2 > 5k and some sparsity critera?)
# Integrator params
self.t_max = t_max
self.run_id = get_runid()
logging.info(f"Run ID: {self.run_id}")
self.npi_file = npi_file
self.disable_npi = disable_npi
self.reject_runs = reject_runs
self.output_dates = None
# COVID/model params from par file
self.bucky_params = buckyParams(par_file)
self.consts = self.bucky_params.consts
self.dists = self.bucky_params.dists
self.g_data = self.load_graph(graph_file)
def update_params(self, update_dict):
self.bucky_params.update_params(update_dict)
self.consts = self.bucky_params.consts
self.dists = self.bucky_params.dists
def load_graph(self, graph_file):
"""Load the graph data and calculate all the variables that are static across MC runs"""
# TODO refactor to just have this return g_data
logging.info("loading graph")
with open(graph_file, "rb") as f:
G = pickle.load(f) # nosec
# Load data from input graph
# TODO we should go through an replace lots of math using self.g_data.* with function IN buckyGraphData
g_data = buckyGraphData(G, self.sparse)
# Make contact mats sym and normalized
self.contact_mats = G.graph["contact_mats"]
if self.debug:
logging.debug(f"graph contact mats: {G.graph['contact_mats'].keys()}")
for mat in self.contact_mats:
c_mat = xp.array(self.contact_mats[mat])
c_mat = (c_mat + c_mat.T) / 2.0
self.contact_mats[mat] = c_mat
# remove all_locations so we can sum over the them ourselves
if "all_locations" in self.contact_mats:
del self.contact_mats["all_locations"]
# Remove unknown contact mats
valid_contact_mats = ["home", "work", "other_locations", "school"]
self.contact_mats = {k: v for k, v in self.contact_mats.items() if k in valid_contact_mats}
self.Cij = xp.vstack([self.contact_mats[k][None, ...] for k in sorted(self.contact_mats)])
# Get stratified population (and total)
self.Nij = g_data.Nij
self.Nj = g_data.Nj
self.n_age_grps = self.Nij.shape[0] # TODO factor out
self.init_date = datetime.date.fromisoformat(G.graph["start_date"])
self.base_mc_instance = buckyMCInstance(self.init_date, self.t_max, self.Nij, self.Cij)
# fill in npi_params either from file or as ones
self.npi_params = get_npi_params(g_data, self.init_date, self.t_max, self.npi_file, self.disable_npi)
if self.npi_params["npi_active"]:
self.base_mc_instance.add_npi(self.npi_params)
self.adm0_cfr_reported = None
self.adm1_cfr_reported = None
self.adm2_cfr_reported = None
# If HHS hospitalization data is on the graph, use it to rescale initial H counts and CHR
# self.rescale_chr = "hhs_data" in G.graph
if self.consts.rescale_chr:
self.adm1_current_hosp = xp.zeros((g_data.max_adm1 + 1,), dtype=float)
# TODO move hosp data to the graph nodes and handle it with graph.py the way cases/deaths are
hhs_data = G.graph["hhs_data"].reset_index()
hhs_data["date"] = pd.to_datetime(hhs_data["date"])
hhs_data = (
hhs_data.set_index("date")
.sort_index()
.groupby("adm1")
.rolling(7)
.mean()
.drop(columns="adm1")
.reset_index()
)
hhs_curr_data = hhs_data.loc[hhs_data.date == pd.Timestamp(self.init_date)]
hhs_curr_data = hhs_curr_data.set_index("adm1").sort_index()
tot_hosps = (
hhs_curr_data.total_adult_patients_hospitalized_confirmed_covid
+ hhs_curr_data.total_pediatric_patients_hospitalized_confirmed_covid
)
self.adm1_current_hosp[tot_hosps.index.to_numpy()] = tot_hosps.to_numpy()
if self.debug:
logging.debug("Current hospitalizations: " + pformat(self.adm1_current_hosp))
# Estimate the recent CFR during the period covered by the historical data
cfr_delay = 25 # 14 # TODO This should come from CDC and Nij
n_cfr = 14
last_cases = (
g_data.rolling_cum_cases[-cfr_delay - n_cfr : -cfr_delay] - g_data.rolling_cum_cases[-cfr_delay - n_cfr - 1]
)
last_deaths = g_data.rolling_cum_deaths[-n_cfr:] - g_data.rolling_cum_deaths[-n_cfr - 1]
adm1_cases = g_data.sum_adm1(last_cases.T)
adm1_deaths = g_data.sum_adm1(last_deaths.T)
negative_mask = (adm1_deaths < 0.0) | (adm1_cases < 0.0)
adm1_cfr = adm1_deaths / adm1_cases
adm1_cfr[negative_mask] = xp.nan
# take mean over n days
self.adm1_current_cfr = xp.nanmedian(adm1_cfr, axis=1)
# Estimate recent CHR
if self.consts.rescale_chr:
chr_delay = 20 # TODO This should come from I_TO_H_TIME and Nij as a float (it's ~5.8)
n_chr = 7
tmp = hhs_data.loc[hhs_data.date > pd.Timestamp(self.init_date - datetime.timedelta(days=n_chr))]
tmp = tmp.loc[tmp.date <= pd.Timestamp(self.init_date)]
tmp = tmp.set_index(["adm1", "date"]).sort_index()
tmp = (
tmp.previous_day_admission_adult_covid_confirmed + tmp.previous_day_admission_pediatric_covid_confirmed
)
cum_hosps = xp.zeros((adm1_cfr.shape[0], n_chr))
tmp = tmp.unstack()
tmp_data = tmp.T.cumsum().to_numpy()
tmp_ind = tmp.index.to_numpy()
cum_hosps[tmp_ind] = tmp_data.T
last_cases = (
g_data.rolling_cum_cases[-chr_delay - n_chr : -chr_delay]
- g_data.rolling_cum_cases[-chr_delay - n_chr - 1]
)
adm1_cases = g_data.sum_adm1(last_cases.T)
adm1_hosps = cum_hosps # g_data.sum_adm1(last_hosps.T)
adm1_chr = adm1_hosps / adm1_cases
# take mean over n days
self.adm1_current_chr = xp.mean(adm1_chr, axis=1)
# self.adm1_current_chr = self.calc_lagged_rate(g_data.adm1_cum_case_hist, cum_hosps.T, chr_delay, n_chr)
if self.debug:
logging.debug("Current CFR: " + pformat(self.adm1_current_cfr))
return g_data
def reset(self, seed=None, params=None):
"""Reset the state of the model and generate new inital data from a new random seed"""
# TODO we should refactor reset of the compartments to be real pop numbers then /Nij at the end
if seed is not None:
random.seed(int(seed))
np.random.seed(seed)
xp.random.seed(seed)
# reroll model params if we're doing that kind of thing
self.g_data.Aij.perturb(self.consts.reroll_variance)
self.params = self.bucky_params.generate_params()
if params is not None:
self.params = copy.deepcopy(params)
if self.debug:
logging.debug("params: " + pformat(self.params, width=120))
for k in self.params:
if type(self.params[k]).__module__ == np.__name__:
self.params[k] = xp.asarray(self.params[k])
# TODO consolidate all the broadcast_to calls
self.params.H = xp.broadcast_to(self.params.H[:, None], self.Nij.shape)
self.params.F = xp.broadcast_to(self.params.F[:, None], self.Nij.shape)
if self.consts.rescale_chr:
# TODO this needs to be cleaned up BAD
adm1_Ni = self.g_data.adm1_Nij
adm1_N = self.g_data.adm1_Nj
# estimate adm2 expected CFR weighted by local age demo
tmp = self.params.F[:, 0][..., None] * self.g_data.adm1_Nij / self.g_data.adm1_Nj
adm1_F = xp.sum(tmp, axis=0)
# get ratio of actual CFR to expected CFR
adm1_F_fac = self.adm1_current_cfr / adm1_F
adm0_F_fac = xp.nanmean(adm1_N * adm1_F_fac) / xp.sum(adm1_N)
adm1_F_fac[xp.isnan(adm1_F_fac)] = adm0_F_fac
F_RR_fac = truncnorm(1.0, self.dists.F_RR_var, size=adm1_F_fac.size, a_min=1e-6)
if self.debug:
logging.debug("adm1 cfr rescaling factor: " + pformat(adm1_F_fac))
self.params.F = self.params.F * F_RR_fac[self.g_data.adm1_id] * adm1_F_fac[self.g_data.adm1_id]
self.params.F = xp.clip(self.params.F, a_min=1.0e-10, a_max=1.0)
adm1_Hi = self.g_data.sum_adm1((self.params.H * self.Nij).T).T
adm1_Hi = adm1_Hi / adm1_Ni
adm1_H = xp.nanmean(adm1_Hi, axis=0)
adm1_H_fac = self.adm1_current_chr / adm1_H
adm0_H_fac = xp.nanmean(adm1_N * adm1_H_fac) / xp.sum(adm1_N)
adm1_H_fac[xp.isnan(adm1_H_fac)] = adm0_H_fac
H_RR_fac = truncnorm(1.0, self.dists.H_RR_var, size=adm1_H_fac.size, a_min=1e-6)
adm1_H_fac = adm1_H_fac * H_RR_fac
# adm1_H_fac = xp.clip(adm1_H_fac, a_min=0.1, a_max=10.0) # prevent extreme values
if self.debug:
logging.debug("adm1 chr rescaling factor: " + pformat(adm1_H_fac))
self.params.H = self.params.H * adm1_H_fac[self.g_data.adm1_id]
self.params.H = xp.clip(self.params.H, a_min=self.params.F, a_max=1.0)
# crr_days_needed = max( #TODO this depends on all the Td params, and D_REPORT_TIME...
case_reporting = self.estimate_reporting(
self.g_data,
self.params,
cfr=self.params.F,
# case_lag=14,
days_back=25,
min_deaths=self.consts.case_reporting_min_deaths,
)
self.case_reporting = approx_mPERT_sample( # TODO these facs should go in param file
mu=xp.clip(case_reporting, a_min=0.05, a_max=0.95),
a=xp.clip(0.7 * case_reporting, a_min=0.01, a_max=0.9),
b=xp.clip(1.3 * case_reporting, a_min=0.1, a_max=1.0),
gamma=50.0,
)
mean_case_reporting = xp.nanmean(self.case_reporting[-self.consts.case_reporting_N_historical_days :], axis=0)
self.params["CASE_REPORT"] = mean_case_reporting
self.params["THETA"] = xp.broadcast_to(
self.params["THETA"][:, None], self.Nij.shape
) # TODO move all the broadcast_to's to one place, they're all over reset()
self.params["GAMMA_H"] = xp.broadcast_to(self.params["GAMMA_H"][:, None], self.Nij.shape)
self.params["F_eff"] = xp.clip(self.params["F"] / self.params["H"], 0.0, 1.0)
# state building init state vector (self.y)
yy = buckyState(self.consts, self.Nij)
if self.debug:
logging.debug("case init")
Ti = self.params.Ti
current_I = xp.sum(frac_last_n_vals(self.g_data.rolling_inc_cases, Ti, axis=0), axis=0)
current_I[xp.isnan(current_I)] = 0.0
current_I[current_I < 0.0] = 0.0
current_I *= 1.0 / (self.params["CASE_REPORT"])
# Roll some random factors for the init compartment values
R_fac = approx_mPERT_sample(**(self.dists.R_fac_dist))
E_fac = approx_mPERT_sample(**(self.dists.E_fac_dist))
H_fac = approx_mPERT_sample(**(self.dists.H_fac_dist))
age_dist_fac = self.Nij / xp.sum(self.Nij, axis=0, keepdims=True)
I_init = E_fac * current_I[None, :] * age_dist_fac / self.Nij # / self.n_age_grps
D_init = self.g_data.cum_death_hist[-1][None, :] * age_dist_fac / self.Nij # / self.n_age_grps
recovered_init = (self.g_data.cum_case_hist[-1] / self.params["SYM_FRAC"]) * R_fac
R_init = (
(recovered_init) * age_dist_fac / self.Nij - D_init - I_init / self.params["SYM_FRAC"]
) # Rh is factored in later
Rt = estimate_Rt(self.g_data, self.params, 7, self.case_reporting)
Rt_fac = approx_mPERT_sample(**(self.dists.Rt_dist))
Rt = Rt * Rt_fac
self.params["R0"] = Rt
self.params["BETA"] = Rt * self.params["GAMMA"] / self.g_data.Aij.diag
exp_frac = (
E_fac
* xp.ones(I_init.shape[-1])
* (self.params.R0)
* self.params.GAMMA
/ self.params.SIGMA
/ (1.0 - R_init)
/ self.params["SYM_FRAC"]
)
yy.I = (1.0 - self.params.H) * I_init / yy.Im
yy.Ic = self.params.H * I_init / yy.Im
# TODO this is an estimate, we should rescale it to the real data if we have it
rh_fac = 1.0 # .4
yy.Rh = self.params.H * I_init / yy.Rhn
if self.consts.rescale_chr:
adm1_hosp = xp.zeros((self.g_data.max_adm1 + 1,), dtype=float)
xp.scatter_add(adm1_hosp, self.g_data.adm1_id, xp.sum(yy.Rh * self.Nij, axis=(0, 1)))
adm2_hosp_frac = (self.adm1_current_hosp / adm1_hosp)[self.g_data.adm1_id]
adm0_hosp_frac = xp.nansum(self.adm1_current_hosp) / xp.nansum(adm1_hosp)
adm2_hosp_frac[xp.isnan(adm2_hosp_frac) | (adm2_hosp_frac == 0.0)] = adm0_hosp_frac
adm2_hosp_frac = xp.sqrt(adm2_hosp_frac * adm0_hosp_frac)
scaling_F = F_RR_fac[self.g_data.adm1_id] * self.consts.F_scaling / H_fac
scaling_H = adm2_hosp_frac * H_fac
self.params["F"] = xp.clip(self.params["F"] * scaling_F, 0.0, 1.0)
self.params["H"] = xp.clip(self.params["H"] * scaling_H, self.params["F"], 1.0) / 1.2
self.params["F_eff"] = xp.clip(self.params["F"] / self.params["H"], 0.0, 1.0)
# TODO rename F_eff to HFR
adm2_chr_delay = xp.sum(self.params["I_TO_H_TIME"][:, None] * self.g_data.Nij / self.g_data.Nj, axis=0)
adm2_chr_delay_int = adm2_chr_delay.astype(int) # TODO temp, this should be a distribution of floats
adm2_chr_delay_mod = adm2_chr_delay % 1
inc_case_h_delay = (1.0 - adm2_chr_delay_mod) * xp.take_along_axis(
self.g_data.rolling_inc_cases, -adm2_chr_delay_int[None, :], axis=0
)[0] + adm2_chr_delay_mod * xp.take_along_axis(
self.g_data.rolling_inc_cases, -adm2_chr_delay_int[None, :] - 1, axis=0
)[
0
]
inc_case_h_delay[(inc_case_h_delay > 0.0) & (inc_case_h_delay < 1.0)] = 1.0
inc_case_h_delay[inc_case_h_delay < 0.0] = 0.0
adm2_chr = xp.sum(self.params["H"] * self.g_data.Nij / self.g_data.Nj, axis=0)
tmp = xp.sum(self.params.H * I_init / yy.Im * self.g_data.Nij, axis=0) / 3.0 # 1/3 is mean sigma
tmp2 = inc_case_h_delay * adm2_chr # * 3.0 # 3 == mean sigma, these should be read from base_params
ic_fac = tmp2 / tmp
ic_fac[~xp.isfinite(ic_fac)] = xp.nanmean(ic_fac[xp.isfinite(ic_fac)])
yy.I = (1.0 - self.params.H) * I_init / yy.Im
yy.Ic = ic_fac * self.params.H * I_init / yy.Im
yy.Rh = (
rh_fac
* self.params.H
* I_init
/ yy.Rhn
# * 1.15 # fit to runs, we should be able to calculate this somehow...
)
R_init -= xp.sum(yy.Rh, axis=0)
yy.Ia = self.params.ASYM_FRAC / self.params.SYM_FRAC * I_init / yy.Im
yy.E = exp_frac[None, :] * I_init / yy.En # this should be calcable from Rt and the time before symp
yy.R = xp.clip(R_init, a_min=0.0, a_max=None)
yy.D = D_init
# TMP
mask = xp.sum(yy.N, axis=0) > 1.0
yy.state[:, mask] /= xp.sum(yy.N, axis=0)[mask]
yy.init_S()
# init the bin we're using to track incident cases
# (it's filled with cumulatives until we diff it later)
# TODO should this come from the rolling hist?
yy.incC = xp.clip(self.g_data.cum_case_hist[-1][None, :], a_min=0.0, a_max=None) * age_dist_fac / self.Nij
self.y = yy
# Sanity check state vector
self.y.validate_state()
if self.debug:
logging.debug("done reset()")
# return y
# @staticmethod need to move the caching out b/c its in the self namespace
def estimate_reporting(self, g_data, params, cfr, days_back=14, case_lag=None, min_deaths=100.0):
"""Estimate the case reporting rate based off observed vs. expected CFR"""
if case_lag is None:
adm0_cfr_by_age = xp.sum(cfr * g_data.Nij, axis=1) / xp.sum(g_data.Nj, axis=0)
adm0_cfr_total = xp.sum(
xp.sum(cfr * g_data.Nij, axis=1) / xp.sum(g_data.Nj, axis=0),
axis=0,
)
case_lag = xp.sum(params["D_REPORT_TIME"] * adm0_cfr_by_age / adm0_cfr_total, axis=0)
case_lag_int = int(case_lag)
recent_cum_cases = g_data.rolling_cum_cases - g_data.rolling_cum_cases[0]
recent_cum_deaths = g_data.rolling_cum_deaths - g_data.rolling_cum_deaths[0]
case_lag_frac = case_lag % 1 # TODO replace with util function for the indexing
cases_lagged = frac_last_n_vals(recent_cum_cases, days_back + case_lag_frac, offset=case_lag_int)
if case_lag_frac:
cases_lagged = cases_lagged[0] + cases_lagged[1:]
# adm0
adm0_cfr_param = xp.sum(xp.sum(cfr * g_data.Nij, axis=1) / xp.sum(g_data.Nj, axis=0), axis=0)
if self.adm0_cfr_reported is None:
self.adm0_cfr_reported = xp.sum(recent_cum_deaths[-days_back:], axis=1) / xp.sum(cases_lagged, axis=1)
adm0_case_report = adm0_cfr_param / self.adm0_cfr_reported
if self.debug:
logging.debug("Adm0 case reporting rate: " + pformat(adm0_case_report))
if xp.any(~xp.isfinite(adm0_case_report)):
if self.debug:
logging.debug("adm0 case report not finite")
logging.debug(adm0_cfr_param)
logging.debug(self.adm0_cfr_reported)
raise SimulationException
case_report = xp.repeat(adm0_case_report[:, None], cases_lagged.shape[-1], axis=1)
# adm1
adm1_cfr_param = xp.zeros((g_data.max_adm1 + 1,), dtype=float)
adm1_totpop = g_data.adm1_Nj # xp.zeros((self.g_data.max_adm1 + 1,), dtype=float)
tmp_adm1_cfr = xp.sum(cfr * g_data.Nij, axis=0)
xp.scatter_add(adm1_cfr_param, g_data.adm1_id, tmp_adm1_cfr)
# xp.scatter_add(adm1_totpop, self.g_data.adm1_id, self.Nj)
adm1_cfr_param /= adm1_totpop
# adm1_cfr_reported is const, only calc it once and cache it
if self.adm1_cfr_reported is None:
self.adm1_deaths_reported = xp.zeros((g_data.max_adm1 + 1, days_back), dtype=float)
adm1_lagged_cases = xp.zeros((g_data.max_adm1 + 1, days_back), dtype=float)
xp.scatter_add(
self.adm1_deaths_reported,
g_data.adm1_id,
recent_cum_deaths[-days_back:].T,
)
xp.scatter_add(adm1_lagged_cases, g_data.adm1_id, cases_lagged.T)
self.adm1_cfr_reported = self.adm1_deaths_reported / adm1_lagged_cases
adm1_case_report = (adm1_cfr_param[:, None] / self.adm1_cfr_reported)[g_data.adm1_id].T
valid_mask = (self.adm1_deaths_reported > min_deaths)[g_data.adm1_id].T & xp.isfinite(adm1_case_report)
case_report[valid_mask] = adm1_case_report[valid_mask]
# adm2
adm2_cfr_param = xp.sum(cfr * (g_data.Nij / g_data.Nj), axis=0)
if self.adm2_cfr_reported is None:
self.adm2_cfr_reported = recent_cum_deaths[-days_back:] / cases_lagged
adm2_case_report = adm2_cfr_param / self.adm2_cfr_reported
valid_adm2_cr = xp.isfinite(adm2_case_report) & (recent_cum_deaths[-days_back:] > min_deaths)
case_report[valid_adm2_cr] = adm2_case_report[valid_adm2_cr]
return case_report
def run_once(self, seed=None):
"""Perform one complete run of the simulation"""
# rename to integrate or something? it also resets...
# reset everything
logging.debug("Resetting state")
self.reset(seed=seed)
logging.debug("Done reset")
self.base_mc_instance.epi_params = self.params
self.base_mc_instance.state = self.y
self.base_mc_instance.Aij = self.g_data.Aij.A
self.base_mc_instance.rhs = RHS_func
self.base_mc_instance.dy = self.y.zeros_like()
# TODO this logic needs to go somewhere else (its rescaling beta to account for S/N term)
# TODO R0 need to be changed before reset()...
S_eff = self.base_mc_instance.S_eff(0, self.base_mc_instance.state)
adm2_S_eff = xp.sum(S_eff * self.g_data.Nij / self.g_data.Nj, axis=0)
adm2_beta_scale = xp.clip(1.0 / (adm2_S_eff + 1e-10), a_min=1.0, a_max=5.0)
self.base_mc_instance.epi_params["R0"] = self.base_mc_instance.epi_params["R0"] * adm2_beta_scale
self.base_mc_instance.epi_params["BETA"] = self.base_mc_instance.epi_params["BETA"] * adm2_beta_scale
adm2_E_tot = xp.sum(self.y.E * self.g_data.Nij / self.g_data.Nj, axis=(0, 1))
adm2_new_E_tot = adm2_beta_scale * adm2_E_tot
S_dist = S_eff / (xp.sum(S_eff, axis=0) + 1e-10)
new_E = xp.tile(
(S_dist * adm2_new_E_tot / self.g_data.Nij * self.g_data.Nj / self.params.consts["En"])[None, ...],
(xp.to_cpu(self.params.consts["En"]), 1, 1),
)
new_S = self.y.S - xp.sum(new_E - self.y.E, axis=0)
self.base_mc_instance.state.E = new_E
self.base_mc_instance.state.S = new_S
# do integration
logging.debug("Starting integration")
sol = xp_ivp.solve_ivp(
# self.RHS_func,
# y0=self.y.state.ravel(),
# args=(
# #self.g_data.Aij.A,
# self.base_mc_instance,
# #self.base_mc_instance.state,
# ),
**self.base_mc_instance.integrator_args
)
logging.debug("Done integration")
return sol
def run_multiple(self, n_mc, base_seed=42, out_columns=None):
"""Perform multiple monte carlos and return their postprocessed results"""
seed_seq = np.random.SeedSequence(base_seed)
success = 0
ret = []
pbar = tqdm.tqdm(total=n_mc, desc="Performing Monte Carlos", dynamic_ncols=True)
while success < n_mc:
mc_seed = seed_seq.spawn(1)[0].generate_state(1)[0] # inc spawn key then grab next seed
pbar.set_postfix_str(
"seed=" + str(mc_seed),
refresh=True,
)
try:
with xp.optimize_kernels():
sol = self.run_once(seed=mc_seed)
df_data = self.postprocess_run(sol, mc_seed, out_columns)
ret.append(df_data)
success += 1
pbar.update(1)
except SimulationException:
pass
pbar.close()
return ret
# TODO Move this to a class thats like run_parser or something (that caches all the info it needs like Nij, and manages the write thread/queue)
# Also give it methods like to_dlpack, to_pytorch, etc
def save_run(self, sol, base_filename, seed, output_queue):
"""Postprocess and write to disk the output of run_once"""
df_data = self.postprocess_run(sol, seed)
# flatten the shape
for c in df_data:
df_data[c] = df_data[c].ravel()
# push the data off to the write thread
data_folder = os.path.join(base_filename, "data")
output_queue.put((data_folder, df_data))
metadata_folder = os.path.join(base_filename, "metadata")
if not os.path.exists(metadata_folder):
os.mkdir(metadata_folder)
# write dates
uniq_dates =
|
pd.Series(self.output_dates)
|
pandas.Series
|
# Function 0
def cleaning_func_0(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['90day_worse_rating'] = np.where(loan['mths_since_last_major_derog'].isnull(), 0, 1)
return loan
#=============
# Function 1
def cleaning_func_1(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['revol_util'] = loan['revol_util'].fillna(loan['revol_util'].median())
return loan
#=============
# Function 2
def cleaning_func_2(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['emp_title'] = np.where(loan['emp_title'].isnull(), 'Job title not given', loan['emp_title'])
return loan
#=============
# Function 3
def cleaning_func_3(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['acc_now_delinq'] = np.where(loan['acc_now_delinq'].isnull(), 0, loan['acc_now_delinq'])
return loan
#=============
# Function 4
def cleaning_func_4(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['delinq_2yrs'] = np.where(loan['delinq_2yrs'].isnull(), 0, loan['delinq_2yrs'])
return loan
#=============
# Function 5
def cleaning_func_5(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['tot_coll_amt'] = loan['tot_coll_amt'].fillna(loan['tot_coll_amt'].median())
return loan
#=============
# Function 6
def cleaning_func_6(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['title'] = np.where(loan['title'].isnull(), 0, loan['title'])
return loan
#=============
# Function 7
def cleaning_func_7(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['total_rev_hi_lim'] = loan['total_rev_hi_lim'].fillna(loan['total_rev_hi_lim'].median())
return loan
#=============
# Function 8
def cleaning_func_8(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['inq_last_6mths'] = np.where(loan['inq_last_6mths'].isnull(), 0, loan['inq_last_6mths'])
return loan
#=============
# Function 9
def cleaning_func_9(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['total_acc'] = np.where(loan['total_acc'].isnull(), 0, loan['total_acc'])
return loan
#=============
# Function 10
def cleaning_func_10(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['annual_inc'] = loan['annual_inc'].fillna(loan['annual_inc'].median())
return loan
#=============
# Function 11
def cleaning_func_11(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['open_acc'] = np.where(loan['open_acc'].isnull(), 0, loan['open_acc'])
return loan
#=============
# Function 12
def cleaning_func_12(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['collections_12_mths_ex_med'] = np.where(loan['collections_12_mths_ex_med'].isnull(), 0, loan['collections_12_mths_ex_med'])
return loan
#=============
# Function 13
def cleaning_func_13(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['tot_cur_bal'] = loan['tot_cur_bal'].fillna(loan['tot_cur_bal'].median())
return loan
#=============
# Function 14
def cleaning_func_14(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['pub_rec'] = np.where(loan['pub_rec'].isnull(), 0, loan['pub_rec'])
return loan
#=============
# Function 15
def cleaning_func_15(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['mths_since_last_delinq'] = np.where(loan['mths_since_last_delinq'].isnull(), 188, loan['mths_since_last_delinq'])
return loan
#=============
# Function 16
def cleaning_func_0(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['pct_paid'] = (loan.out_prncp / loan.loan_amnt)
return loan
#=============
# Function 17
def cleaning_func_1(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['issue_mo'] = loan.issue_d.str[slice(0, 3, None)]
return loan
#=============
# Function 18
def cleaning_func_2(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['issue_year'] = loan.issue_d.str[slice(4, None, None)]
return loan
#=============
# Function 19
def cleaning_func_0(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['bad_loan'] = 0
return data
#=============
# Function 20
def cleaning_func_1(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
bad_indicators = ['Charged Off ', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Default Receiver', 'Late (16-30 days)', 'Late (31-120 days)']
data.loc[(data.loan_status.isin(bad_indicators), 'bad_loan')] = 1
return data
#=============
# Function 21
def cleaning_func_2(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
return data
#=============
# Function 22
def cleaning_func_3(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
data['month'] = data['issue_dt'].dt.month
return data
#=============
# Function 23
def cleaning_func_4(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
data['year'] = data['issue_dt'].dt.year
return data
#=============
# Function 24
def cleaning_func_0(loans):
# core cleaning code
import pandas as pd
date = ['issue_d', 'last_pymnt_d']
cols = ['issue_d', 'term', 'int_rate', 'loan_amnt', 'total_pymnt', 'last_pymnt_d', 'sub_grade', 'grade', 'loan_status']
# loans = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=date, usecols=cols, infer_datetime_format=True)
latest = loans['issue_d'].max()
finished_bool = (((loans['issue_d'] < (latest - pd.DateOffset(years=3))) & (loans['term'] == ' 36 months')) | ((loans['issue_d'] < (latest - pd.DateOffset(years=5))) & (loans['term'] == ' 60 months')))
finished_loans = loans.loc[finished_bool]
finished_loans['roi'] = (((finished_loans.total_pymnt / finished_loans.loan_amnt) - 1) * 100)
return finished_loans
#=============
# Function 25
def cleaning_func_0(df):
# core cleaning code
import pandas as pd
badLoan = ['Charged Off', 'Default', 'Late (31-120 days)', 'Late (16-30 days)', 'In Grace Period', 'Does not meet the credit policy. Status:Charged Off']
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
df['isBad'] = [(1 if (x in badLoan) else 0) for x in df.loan_status]
return df
#=============
# Function 26
def cleaning_func_4(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
perStatedf.columns = ['State', 'Num_Loans']
return perStatedf
#=============
# Function 27
def cleaning_func_5(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
return df.groupby('addr_state', as_index=False).count()
#=============
# Function 28
def cleaning_func_6(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='loan_amnt', ascending=False)
perStatedf.columns = ['State', 'loan_amt']
return perStatedf
#=============
# Function 29
def cleaning_func_8(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
perStatedf.columns = ['State', 'badLoans']
return perStatedf
#=============
# Function 30
def cleaning_func_10(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
perStatedf.columns = ['State', 'totalLoans']
return perStatedf
#=============
# Function 31
def cleaning_func_14(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
return perStatedf
#=============
# Function 32
def cleaning_func_15(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
perStatedf = pd.merge(perStatedf, statePopdf, on=['State'], how='inner')
perStatedf['PerCaptia'] = (perStatedf.Num_Loans / perStatedf.Pop)
return perStatedf
#=============
# Function 33
def cleaning_func_16(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
return pd.DataFrame.from_dict(statePop, orient='index')
#=============
# Function 34
def cleaning_func_17(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
return statePopdf
#=============
# Function 35
def cleaning_func_18(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf =
|
pd.DataFrame.from_dict(statePop, orient='index')
|
pandas.DataFrame.from_dict
|
"""
Data: Temeprature and Salinity time series from SIO Scripps Pier
Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m)
Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m)
- Timestamp included beginning in 1990
"""
# imports
import sys,os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from scipy import signal
import scipy.stats as ss
# read in temp and sal files
sal_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27)
temp_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26)
ENSO_data = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx')
ENSO_data_recent = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx')
precip_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_Precip_data.csv')
PDO_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_PDO_data.csv', skiprows = 1)
# path_out = '/Users/MMStoll/Python/Output/Ocean569_Output/SIO_Output/'
# convert year, month, day columns to single DATE column
sal_data['DATE'] =
|
pd.to_datetime(sal_data[['YEAR', 'MONTH', 'DAY']])
|
pandas.to_datetime
|
import rdflib
from datetime import datetime
from nanopub import Nanopublication
import logging
import sys
import pandas as pd
import configparser
import hashlib
from .autonomic.update_change_service import UpdateChangeService
from whyis.namespace import whyis, prov, sio
class Interpreter(UpdateChangeService):
kb = ":"
cb_fn = None
timeline_fn = None
data_fn = None
prefix_fn = "prefixes.txt"
prefixes = {}
studyRef = None
unit_code_list = []
unit_uri_list = []
unit_label_list = []
explicit_entry_list = []
virtual_entry_list = []
explicit_entry_tuples = []
virtual_entry_tuples = []
cb_tuple = {}
timeline_tuple = {}
config = configparser.ConfigParser()
def __init__(self, config_fn=None): # prefixes should be
if config_fn is not None:
try:
self.config.read(config_fn)
except Exception as e:
logging.exception("Error: Unable to open configuration file: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Prefixes', 'prefixes'):
self.prefix_fn = self.config.get('Prefixes', 'prefixes')
# prefix_file = open(self.prefix_fn,"r")
# self.prefixes = prefix_file.readlines()
prefix_file = pd.read_csv(self.prefix_fn, dtype=object)
try:
for row in prefix_file.itertuples():
self.prefixes[row.prefix] = row.url
except Exception as e:
logging.exception("Error: Something went wrong when trying to read the Prefix File: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Prefixes', 'base_uri'):
self.kb = self.config.get('Prefixes', 'base_uri')
if self.config.has_option('Source Files', 'dictionary'):
dm_fn = self.config.get('Source Files', 'dictionary')
try:
dm_file = pd.read_csv(dm_fn, dtype=object)
try: # Populate virtual and explicit entry lists
for row in dm_file.itertuples():
if pd.isnull(row.Column):
logging.exception("Error: The SDD must have a column named 'Column'")
sys.exit(1)
if row.Column.startswith("??"):
self.virtual_entry_list.append(row)
else:
self.explicit_entry_list.append(row)
except Exception as e:
logging.exception(
"Error: Something went wrong when trying to read the Dictionary Mapping File: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
except Exception as e:
logging.exception("Error: The specified Dictionary Mapping file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'codebook'):
self.cb_fn = self.config.get('Source Files', 'codebook')
if self.cb_fn is not None:
try:
cb_file = pd.read_csv(self.cb_fn, dtype=object)
try:
inner_tuple_list = []
for row in cb_file.itertuples():
if (pd.notnull(row.Column) and row.Column not in self.cb_tuple):
inner_tuple_list = []
inner_tuple = {}
inner_tuple["Code"] = row.Code
if pd.notnull(row.Label):
inner_tuple["Label"] = row.Label
if pd.notnull(row.Class):
inner_tuple["Class"] = row.Class
if "Resource" in row and pd.notnull(row.Resource):
inner_tuple["Resource"] = row.Resource
inner_tuple_list.append(inner_tuple)
self.cb_tuple[row.Column] = inner_tuple_list
except Exception as e:
logging.warning("Warning: Unable to process Codebook file: ")
if hasattr(e, 'message'):
logging.warning(e.message)
else:
logging.warning(e)
except Exception as e:
logging.exception("Error: The specified Codebook file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'timeline'):
self.timeline_fn = self.config.get('Source Files', 'timeline')
if self.timeline_fn is not None:
try:
timeline_file = pd.read_csv(self.timeline_fn, dtype=object)
try:
inner_tuple_list = []
for row in timeline_file.itertuples():
if pd.notnull(row.Name) and row.Name not in self.timeline_tuple:
inner_tuple_list = []
inner_tuple = {}
inner_tuple["Type"] = row.Type
if pd.notnull(row.Label):
inner_tuple["Label"] = row.Label
if pd.notnull(row.Start):
inner_tuple["Start"] = row.Start
if pd.notnull(row.End):
inner_tuple["End"] = row.End
if pd.notnull(row.Unit):
inner_tuple["Unit"] = row.Unit
if pd.notnull(row.inRelationTo):
inner_tuple["inRelationTo"] = row.inRelationTo
inner_tuple_list.append(inner_tuple)
self.timeline_tuple[row.Name] = inner_tuple_list
except Exception as e:
logging.warning("Warning: Unable to process Timeline file: ")
if hasattr(e, 'message'):
logging.warning(e.message)
else:
logging.warning(e)
except Exception as e:
logging.exception("Error: The specified Timeline file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'code_mappings'):
cmap_fn = self.config.get('Source Files', 'code_mappings')
code_mappings_reader = pd.read_csv(cmap_fn)
for code_row in code_mappings_reader.itertuples():
if pd.notnull(code_row.code):
self.unit_code_list.append(code_row.code)
if pd.notnull(code_row.uri):
self.unit_uri_list.append(code_row.uri)
if pd.notnull(code_row.label):
self.unit_label_list.append(code_row.label)
if self.config.has_option('Source Files', 'data_file'):
self.data_fn = self.config.get('Source Files', 'data_file')
def getInputClass(self):
return whyis.SemanticDataDictionary
def getOutputClass(self):
return whyis.SemanticDataDictionaryInterpretation
def get_query(self):
return '''SELECT ?s WHERE { ?s ?p ?o .} LIMIT 1\n'''
def process(self, i, o):
print("Processing SDD...")
self.app.db.store.nsBindings = {}
npub = Nanopublication(store=o.graph.store)
# prefixes={}
# prefixes.update(self.prefixes)
# prefixes.update(self.app.NS.prefixes)
self.writeVirtualEntryNano(npub)
self.writeExplicitEntryNano(npub)
self.interpretData(npub)
def parseString(self, input_string, delim):
my_list = input_string.split(delim)
my_list = [element.strip() for element in my_list]
return my_list
def rdflibConverter(self, input_word):
if "http" in input_word:
return rdflib.term.URIRef(input_word)
if ':' in input_word:
word_list = input_word.split(":")
term = self.prefixes[word_list[0]] + word_list[1]
return rdflib.term.URIRef(term)
return rdflib.Literal(input_word, datatype=rdflib.XSD.string)
def codeMapper(self, input_word):
unitVal = input_word
for unit_label in self.unit_label_list:
if unit_label == input_word:
unit_index = self.unit_label_list.index(unit_label)
unitVal = self.unit_uri_list[unit_index]
for unit_code in self.unit_code_list:
if unit_code == input_word:
unit_index = self.unit_code_list.index(unit_code)
unitVal = self.unit_uri_list[unit_index]
return unitVal
def convertVirtualToKGEntry(self, *args):
if args[0][:2] == "??":
if self.studyRef is not None:
if args[0] == self.studyRef:
return self.prefixes[self.kb] + args[0][2:]
if len(args) == 2:
return self.prefixes[self.kb] + args[0][2:] + "-" + args[1]
return self.prefixes[self.kb] + args[0][2:]
if ':' not in args[0]:
# Check for entry in column list
for item in self.explicit_entry_list:
if args[0] == item.Column:
if len(args) == 2:
return self.prefixes[self.kb] + args[0].replace(" ", "_").replace(",", "").replace("(",
"").replace(
")", "").replace("/", "-").replace("\\", "-") + "-" + args[1]
return self.prefixes[self.kb] + args[0].replace(" ", "_").replace(",", "").replace("(", "").replace(
")", "").replace("/", "-").replace("\\", "-")
return '"' + args[0] + "\"^^xsd:string"
return args[0]
def checkVirtual(self, input_word):
try:
if input_word[:2] == "??":
return True
return False
except Exception as e:
logging.exception("Something went wrong in Interpreter.checkVirtual(): ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
def isfloat(self, value):
try:
float(value)
return True
except ValueError:
return False
def writeVirtualEntryNano(self, nanopub):
for item in self.virtual_entry_list:
virtual_tuple = {}
term = rdflib.term.URIRef(self.prefixes[self.kb] + str(item.Column[2:]))
nanopub.assertion.add((term, rdflib.RDF.type, rdflib.OWL.Class))
nanopub.assertion.add(
(term, rdflib.RDFS.label, rdflib.Literal(str(item.Column[2:]), datatype=rdflib.XSD.string)))
# Set the rdf:type of the virtual row to either the Attribute or Entity value (or else owl:Individual)
if (pd.notnull(item.Entity)) and (pd.isnull(item.Attribute)):
if ',' in item.Entity:
entities = self.parseString(item.Entity, ',')
for entity in entities:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(entity))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Entity))))
virtual_tuple["Column"] = item.Column
virtual_tuple["Entity"] = self.codeMapper(item.Entity)
if virtual_tuple["Entity"] == "hasco:Study":
self.studyRef = item.Column
virtual_tuple["Study"] = item.Column
elif (pd.isnull(item.Entity)) and (pd.notnull(item.Attribute)):
if ',' in item.Attribute:
attributes = self.parseString(item.Attribute, ',')
for attribute in attributes:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(attribute))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Attribute))))
virtual_tuple["Column"] = item.Column
virtual_tuple["Attribute"] = self.codeMapper(item.Attribute)
else:
logging.warning(
"Warning: Virtual entry not assigned an Entity or Attribute value, or was assigned both.")
virtual_tuple["Column"] = item.Column
# If there is a value in the inRelationTo column ...
if pd.notnull(item.inRelationTo):
virtual_tuple["inRelationTo"] = item.inRelationTo
# If there is a value in the Relation column but not the Role column ...
if (pd.notnull(item.Relation)) and (pd.isnull(item.Role)):
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
virtual_tuple["Relation"] = item.Relation
# If there is a value in the Role column but not the Relation column ...
elif (pd.isnull(item.Relation)) and (pd.notnull(item.Role)):
role = rdflib.BNode()
nanopub.assertion.add(
(role, rdflib.RDF.type, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add(
(role, sio.inRelationTo, self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
nanopub.assertion.add((term, sio.hasRole, role))
virtual_tuple["Role"] = item.Role
# If there is a value in the Role and Relation columns ...
elif (pd.notnull(item.Relation)) and (pd.notnull(item.Role)):
virtual_tuple["Relation"] = item.Relation
virtual_tuple["Role"] = item.Role
nanopub.assertion.add(
(term, sio.hasRole, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
nanopub.provenance.add((term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z",
datatype=rdflib.XSD.dateTime)))
if pd.notnull(item.wasDerivedFrom):
if ',' in item.wasDerivedFrom:
derivedFromTerms = self.parseString(item.wasDerivedFrom, ',')
for derivedFromTerm in derivedFromTerms:
nanopub.provenance.add((term, prov.wasDerivedFrom,
self.rdflibConverter(self.convertVirtualToKGEntry(derivedFromTerm))))
else:
nanopub.provenance.add((term, prov.wasDerivedFrom,
self.rdflibConverter(self.convertVirtualToKGEntry(item.wasDerivedFrom))))
virtual_tuple["wasDerivedFrom"] = item.wasDerivedFrom
if pd.notnull(item.wasGeneratedBy):
if ',' in item.wasGeneratedBy:
generatedByTerms = self.parseString(item.wasGeneratedBy, ',')
for generatedByTerm in generatedByTerms:
nanopub.provenance.add((term, prov.wasGeneratedBy,
self.rdflibConverter(self.convertVirtualToKGEntry(generatedByTerm))))
else:
nanopub.provenance.add((term, prov.wasGeneratedBy,
self.rdflibConverter(self.convertVirtualToKGEntry(item.wasGeneratedBy))))
virtual_tuple["wasGeneratedBy"] = item.wasGeneratedBy
self.virtual_entry_tuples.append(virtual_tuple)
if self.timeline_fn is not None:
for key in self.timeline_tuple:
tl_term = self.rdflibConverter(self.convertVirtualToKGEntry(key))
nanopub.assertion.add((tl_term, rdflib.RDF.type, rdflib.OWL.Class))
for timeEntry in self.timeline_tuple[key]:
if 'Type' in timeEntry:
nanopub.assertion.add(
(tl_term, rdflib.RDFS.subClassOf, self.rdflibConverter(timeEntry['Type'])))
if 'Label' in timeEntry:
nanopub.assertion.add((tl_term, rdflib.RDFS.label,
rdflib.Literal(str(timeEntry['Label']), datatype=rdflib.XSD.string)))
if 'Start' in timeEntry and 'End' in timeEntry and timeEntry['Start'] == timeEntry['End']:
nanopub.assertion.add((tl_term, sio.hasValue, self.rdflibConverter(str(timeEntry['Start']))))
if 'Start' in timeEntry:
start_time = rdflib.BNode()
nanopub.assertion.add((start_time, sio.hasValue, self.rdflibConverter(str(timeEntry['Start']))))
nanopub.assertion.add((tl_term, sio.hasStartTime, start_time))
if 'End' in timeEntry:
end_time = rdflib.BNode()
nanopub.assertion.add((end_time, sio.hasValue, self.rdflibConverter(str(timeEntry['End']))))
nanopub.assertion.add((tl_term, sio.hasEndTime, end_time))
if 'Unit' in timeEntry:
nanopub.assertion.add(
(tl_term, sio.hasUnit, self.rdflibConverter(self.codeMapper(timeEntry['Unit']))))
if 'inRelationTo' in timeEntry:
nanopub.assertion.add((tl_term, sio.inRelationTo, self.rdflibConverter(
self.convertVirtualToKGEntry(timeEntry['inRelationTo']))))
nanopub.provenance.add((tl_term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z",
datatype=rdflib.XSD.dateTime)))
def writeExplicitEntryNano(self, nanopub):
for item in self.explicit_entry_list:
explicit_entry_tuple = {}
term = rdflib.term.URIRef(self.prefixes[self.kb] + str(
item.Column.replace(" ", "_").replace(",", "").replace("(", "").replace(")", "").replace("/",
"-").replace(
"\\", "-")))
nanopub.assertion.add((term, rdflib.RDF.type, rdflib.OWL.Class))
if pd.notnull(item.Attribute):
if ',' in item.Attribute:
attributes = self.parseString(item.Attribute, ',')
for attribute in attributes:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(attribute))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Attribute))))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Attribute"] = self.codeMapper(item.Attribute)
elif pd.notnull(item.Entity):
if ',' in item.Entity:
entities = self.parseString(item.Entity, ',')
for entity in entities:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(entity))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Entity))))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Entity"] = self.codeMapper(item.Entity)
else:
nanopub.assertion.add((term, rdflib.RDFS.subClassOf, sio.Attribute))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Attribute"] = self.codeMapper("sio:Attribute")
logging.warning("Warning: Explicit entry not assigned an Attribute or Entity value.")
if pd.notnull(item.attributeOf):
nanopub.assertion.add(
(term, sio.isAttributeOf, self.rdflibConverter(self.convertVirtualToKGEntry(item.attributeOf))))
explicit_entry_tuple["isAttributeOf"] = self.convertVirtualToKGEntry(item.attributeOf)
else:
logging.warning("Warning: Explicit entry not assigned an isAttributeOf value.")
if pd.notnull(item.Unit):
nanopub.assertion.add(
(term, sio.hasUnit, self.rdflibConverter(self.convertVirtualToKGEntry(self.codeMapper(item.Unit)))))
explicit_entry_tuple["Unit"] = self.convertVirtualToKGEntry(self.codeMapper(item.Unit))
if pd.notnull(item.Time):
nanopub.assertion.add(
(term, sio.existsAt, self.rdflibConverter(self.convertVirtualToKGEntry(item.Time))))
explicit_entry_tuple["Time"] = item.Time
if pd.notnull(item.inRelationTo):
explicit_entry_tuple["inRelationTo"] = item.inRelationTo
# If there is a value in the Relation column but not the Role column ...
if (pd.notnull(item.Relation)) and (pd.isnull(item.Role)):
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
explicit_entry_tuple["Relation"] = item.Relation
# If there is a value in the Role column but not the Relation column ...
elif (pd.isnull(item.Relation)) and (
|
pd.notnull(item.Role)
|
pandas.notnull
|
import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pvlib.location import Location
from pvlib import clearsky
from pvlib import solarposition
from pvlib import irradiance
from pvlib import atmosphere
from conftest import (requires_ephem, requires_numba, needs_numpy_1_10,
pandas_0_22)
# setup times and location to be tested.
tus = Location(32.2, -111, 'US/Arizona', 700)
# must include night values
times = pd.date_range(start='20140624', freq='6H', periods=4, tz=tus.tz)
ephem_data = solarposition.get_solarposition(
times, tus.latitude, tus.longitude, method='nrel_numpy')
irrad_data = tus.get_clearsky(times, model='ineichen', linke_turbidity=3)
dni_et = irradiance.extraradiation(times.dayofyear)
ghi = irrad_data['ghi']
# setup for et rad test. put it here for readability
timestamp = pd.Timestamp('20161026')
dt_index = pd.DatetimeIndex([timestamp])
doy = timestamp.dayofyear
dt_date = timestamp.date()
dt_datetime = datetime.datetime.combine(dt_date, datetime.time(0))
dt_np64 = np.datetime64(dt_datetime)
value = 1383.636203
@pytest.mark.parametrize('input, expected', [
(doy, value),
(np.float64(doy), value),
(dt_date, value),
(dt_datetime, value),
(dt_np64, value),
(np.array([doy]), np.array([value])),
(pd.Series([doy]), np.array([value])),
(dt_index, pd.Series([value], index=dt_index)),
(timestamp, value)
])
@pytest.mark.parametrize('method', [
'asce', 'spencer', 'nrel', requires_ephem('pyephem')])
def test_extraradiation(input, expected, method):
out = irradiance.extraradiation(input)
assert_allclose(out, expected, atol=1)
@requires_numba
def test_extraradiation_nrel_numba():
result = irradiance.extraradiation(times, method='nrel', how='numba', numthreads=8)
assert_allclose(result, [1322.332316, 1322.296282, 1322.261205, 1322.227091])
def test_extraradiation_epoch_year():
out = irradiance.extraradiation(doy, method='nrel', epoch_year=2012)
assert_allclose(out, 1382.4926804890767, atol=0.1)
def test_extraradiation_invalid():
with pytest.raises(ValueError):
irradiance.extraradiation(300, method='invalid')
def test_grounddiffuse_simple_float():
result = irradiance.grounddiffuse(40, 900)
assert_allclose(result, 26.32000014911496)
def test_grounddiffuse_simple_series():
ground_irrad = irradiance.grounddiffuse(40, ghi)
assert ground_irrad.name == 'diffuse_ground'
def test_grounddiffuse_albedo_0():
ground_irrad = irradiance.grounddiffuse(40, ghi, albedo=0)
assert 0 == ground_irrad.all()
def test_grounddiffuse_albedo_invalid_surface():
with pytest.raises(KeyError):
irradiance.grounddiffuse(40, ghi, surface_type='invalid')
def test_grounddiffuse_albedo_surface():
result = irradiance.grounddiffuse(40, ghi, surface_type='sand')
assert_allclose(result, [0, 3.731058, 48.778813, 12.035025], atol=1e-4)
def test_isotropic_float():
result = irradiance.isotropic(40, 100)
assert_allclose(result, 88.30222215594891)
def test_isotropic_series():
result = irradiance.isotropic(40, irrad_data['dhi'])
assert_allclose(result, [0, 35.728402, 104.601328, 54.777191], atol=1e-4)
def test_klucher_series_float():
result = irradiance.klucher(40, 180, 100, 900, 20, 180)
assert_allclose(result, 88.3022221559)
def test_klucher_series():
result = irradiance.klucher(40, 180, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
assert_allclose(result, [0, 37.446276, 109.209347, 56.965916], atol=1e-4)
def test_haydavies():
result = irradiance.haydavies(40, 180, irrad_data['dhi'], irrad_data['dni'],
dni_et,
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
assert_allclose(result, [0, 14.967008, 102.994862, 33.190865], atol=1e-4)
def test_reindl():
result = irradiance.reindl(40, 180, irrad_data['dhi'], irrad_data['dni'],
irrad_data['ghi'], dni_et,
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
assert_allclose(result, [np.nan, 15.730664, 104.131724, 34.166258], atol=1e-4)
def test_king():
result = irradiance.king(40, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'])
assert_allclose(result, [0, 44.629352, 115.182626, 79.719855], atol=1e-4)
def test_perez():
am = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], am)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=times)
assert_series_equal(out, expected, check_less_precise=2)
def test_perez_components():
am = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out, df_components = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], am, return_components=True)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=times)
expected_components = pd.DataFrame(
np.array([[ 0. , 26.84138589, np.nan, 31.72696071],
[ 0. , 0. , np.nan, 4.47966439],
[ 0. , 4.62212181, np.nan, 9.25316454]]).T,
columns=['isotropic', 'circumsolar', 'horizon'],
index=times
)
if pandas_0_22():
expected_for_sum = expected.copy()
expected_for_sum.iloc[2] = 0
else:
expected_for_sum = expected
sum_components = df_components.sum(axis=1)
assert_series_equal(out, expected, check_less_precise=2)
assert_frame_equal(df_components, expected_components)
assert_series_equal(sum_components, expected_for_sum, check_less_precise=2)
@needs_numpy_1_10
def test_perez_arrays():
am = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'].values, dni.values,
dni_et, ephem_data['apparent_zenith'].values,
ephem_data['azimuth'].values, am.values)
expected = np.array(
[ 0. , 31.46046871, np.nan, 45.45539877])
assert_allclose(out, expected, atol=1e-2)
def test_liujordan():
expected = pd.DataFrame(np.
array([[863.859736967, 653.123094076, 220.65905025]]),
columns=['ghi', 'dni', 'dhi'],
index=[0])
out = irradiance.liujordan(
pd.Series([10]), pd.Series([0.5]), pd.Series([1.1]), dni_extra=1400)
assert_frame_equal(out, expected)
# klutcher (misspelling) will be removed in 0.3
def test_total_irrad():
models = ['isotropic', 'klutcher', 'klucher',
'haydavies', 'reindl', 'king', 'perez']
AM = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
for model in models:
total = irradiance.total_irrad(
32, 180,
ephem_data['apparent_zenith'], ephem_data['azimuth'],
dni=irrad_data['dni'], ghi=irrad_data['ghi'],
dhi=irrad_data['dhi'],
dni_extra=dni_et, airmass=AM,
model=model,
surface_type='urban')
assert total.columns.tolist() == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
@pytest.mark.parametrize('model', ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez'])
def test_total_irrad_scalars(model):
total = irradiance.total_irrad(
32, 180,
10, 180,
dni=1000, ghi=1100,
dhi=100,
dni_extra=1400, airmass=1,
model=model,
surface_type='urban')
assert list(total.keys()) == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
# test that none of the values are nan
assert np.isnan(np.array(list(total.values()))).sum() == 0
def test_globalinplane():
aoi = irradiance.aoi(40, 180, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
airmass = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
gr_sand = irradiance.grounddiffuse(40, ghi, surface_type='sand')
diff_perez = irradiance.perez(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'], airmass)
irradiance.globalinplane(
aoi=aoi, dni=irrad_data['dni'], poa_sky_diffuse=diff_perez,
poa_ground_diffuse=gr_sand)
def test_disc_keys():
clearsky_data = tus.get_clearsky(times, model='ineichen',
linke_turbidity=3)
disc_data = irradiance.disc(clearsky_data['ghi'], ephem_data['zenith'],
ephem_data.index)
assert 'dni' in disc_data.columns
assert 'kt' in disc_data.columns
assert 'airmass' in disc_data.columns
def test_disc_value():
times = pd.DatetimeIndex(['2014-06-24T12-0700','2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith =
|
pd.Series([10.567, 72.469], index=times)
|
pandas.Series
|
"""Simulate 2D tracks with various motion types
"""
import numpy as np
import pandas as pd
from .utils import IdDefaults
coords = IdDefaults.xy
trackid = IdDefaults.track
frameid = IdDefaults.frame
def _brownian_xy(n, diffusion=1, xs_rng=(0, 100), ys_rng=(0, 100), frame_interval=1):
x_off = np.random.rand() * (xs_rng[1] - xs_rng[0]) + xs_rng[0]
y_off = np.random.rand() * (ys_rng[1] - ys_rng[0]) + ys_rng[1]
pos_xy = np.random.randn(n, 2) * np.sqrt(2 * diffusion * frame_interval)
pos_xy = np.cumsum(pos_xy, axis=0) + [y_off, x_off]
return pos_xy
def brownian(
n_tracks=20,
min_time=0,
max_time=42,
diffusion=1,
xs_rng=(0, 100),
ys_rng=(0, 100),
frame_interval=1,
):
"""Simmulates brownian motion by Gaussian random walk.
Args:
n_tracks (int, optional): Defaults to 20.
min_time (int, optional): Defaults to 0.
max_time (int, optional): Defaults to 42.
diffusion (int, optional): Defaults to 1.
xs_rng (tuple, optional): Defaults to (0, 100).
ys_rng (tuple, optional): Defaults to (0, 100).
frame_interval (int, optional): Defaults to 1.
Returns:
pandas.DataFrame: tracks
"""
res = []
for t_id in range(n_tracks):
frames = np.arange(min_time, max_time + 1, dtype=np.int32) * frame_interval
track_ids = np.ones(len(frames), dtype=np.int32) * t_id
pos_xy = _brownian_xy(
len(frames),
diffusion=diffusion,
xs_rng=xs_rng,
ys_rng=ys_rng,
frame_interval=frame_interval,
)
df = pd.DataFrame(pos_xy, columns=coords)
df.insert(0, frameid, frames)
df.insert(0, trackid, track_ids)
res.append(df)
brownian_track = pd.concat(res, axis=0).reset_index(drop=True)
return brownian_track
def _linear_xy(
n, velocity=1, xs_rng=(0, 100), ys_rng=(0, 100),
):
x_off = np.random.rand() * (xs_rng[1] - xs_rng[0]) + xs_rng[0]
y_off = np.random.rand() * (ys_rng[1] - ys_rng[0]) + ys_rng[1]
xy_direction = np.random.randn(1, 2)
xy_direction = xy_direction / np.linalg.norm(xy_direction) * velocity
pos_xy = np.tile(xy_direction, (n, 1))
pos_xy = np.cumsum(pos_xy, axis=0) + [y_off, x_off]
return pos_xy
def linear(
n_tracks=20,
min_time=0,
max_time=42,
velocity=1,
xs_rng=(0, 100),
ys_rng=(0, 100),
frame_interval=1,
):
"""Simulate linear motion
Args:
n_tracks (int, optional): Defaults to 20.
min_time (int, optional): Defaults to 0.
max_time (int, optional): Defaults to 42.
velocity (int, optional): Defaults to 1.
xs_rng (tuple, optional): Defaults to (0, 100).
ys_rng (tuple, optional): Defaults to (0, 100).
frame_interval (int, optional): Defaults to 1.
Returns:
pandas.DataFrame: tracks
"""
res = []
for t_id in range(n_tracks):
frames = np.arange(min_time, max_time + 1, dtype=np.int32) * frame_interval
track_ids = np.ones(len(frames), dtype=np.int32) * t_id
xy_direction = np.random.randn(1, 2)
xy_direction = xy_direction / np.linalg.norm(xy_direction) * velocity
pos_xy = _linear_xy(
len(frames), velocity=velocity, xs_rng=xs_rng, ys_rng=ys_rng
)
df = pd.DataFrame(pos_xy, columns=coords)
df.insert(0, frameid, frames)
df.insert(0, trackid, track_ids)
res.append(df)
brownian_track =
|
pd.concat(res, axis=0)
|
pandas.concat
|
from copy import deepcopy
from typing import List
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from etna.datasets import generate_ar_df
from etna.datasets.tsdataset import TSDataset
from etna.transforms import AddConstTransform
from etna.transforms import FilterFeaturesTransform
from etna.transforms import LagTransform
from etna.transforms import MaxAbsScalerTransform
from etna.transforms import OneHotEncoderTransform
from etna.transforms import SegmentEncoderTransform
from etna.transforms import TimeSeriesImputerTransform
@pytest.fixture()
def tsdf_with_exog(random_seed) -> TSDataset:
df_1 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_2 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_1["segment"] = "Moscow"
df_1["target"] = [x ** 2 + np.random.uniform(-2, 2) for x in list(range(len(df_1)))]
df_2["segment"] = "Omsk"
df_2["target"] = [x ** 0.5 + np.random.uniform(-2, 2) for x in list(range(len(df_2)))]
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(classic_df)
classic_df_exog = generate_ar_df(start_time="2021-01-01", periods=600, n_segments=2)
classic_df_exog.rename(columns={"target": "exog"}, inplace=True)
df_exog = TSDataset.to_dataset(classic_df_exog)
ts = TSDataset(df=df, df_exog=df_exog, freq="1D")
return ts
@pytest.fixture()
def df_and_regressors() -> Tuple[pd.DataFrame, pd.DataFrame, List[str]]:
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range("2020-12-01", "2021-02-11")
df_1 = pd.DataFrame({"timestamp": timestamp, "regressor_1": 1, "regressor_2": 2, "segment": "1"})
df_2 =
|
pd.DataFrame({"timestamp": timestamp[5:], "regressor_1": 3, "regressor_2": 4, "segment": "2"})
|
pandas.DataFrame
|
import warnings
warnings.simplefilter("ignore")
import json
import requests
import os
import geopandas as gpd
from osgeo import gdal
import pandas as pd
import rasterio
from rasterio import merge as riomerge
from pyproj import Transformer
import urllib
from shapely.geometry import Point
downloadfolder = f"{os.getcwd()}/"
outputfolder = os.path.join(downloadfolder, "output/")
downloadfolder_DSM = os.path.join(downloadfolder, "DSM_tif/")
downloadfolder_DTM = os.path.join(downloadfolder, "DTM_tif/")
be_shp_path = os.path.join(downloadfolder, "Kaartbladversnijdingen_NGI_numerieke_reeks_Shapefile/Shapefile/Kbl.shp")
BpnCapa_path = os.path.join(downloadfolder, "CadGIS_fiscaal_20210101_GewVLA_Shapefile/Shapefile/BpnCapa.shp")
BpnCapa_1_path = os.path.join(downloadfolder, "CadGIS_fiscaal_20210101_GewVLA_Shapefile/Shapefile/BpnCapa_1.shp")
BpnRebu_path = os.path.join(downloadfolder, "CadGIS_fiscaal_20210101_GewVLA_Shapefile/Shapefile/BpnRebu.shp")
BpnRebu_1_path = os.path.join(downloadfolder, "CadGIS_fiscaal_20210101_GewVLA_Shapefile/Shapefile/BpnRebu_1.shp")
BpnCabu_path = os.path.join(downloadfolder, "CadGIS_fiscaal_20210101_GewVLA_Shapefile/Shapefile/BpnCabu.shp")
basefiles_missing = False
if os.path.exists(be_shp_path) == False:
basefiles_missing = True
if os.path.exists(BpnCapa_path) == False:
basefiles_missing = True
if os.path.exists(BpnCapa_1_path) == False:
basefiles_missing = True
if os.path.exists(BpnRebu_path) == False:
basefiles_missing = True
if os.path.exists(BpnRebu_1_path) == False:
basefiles_missing = True
if os.path.exists(BpnCabu_path) == False:
basefiles_missing = True
if basefiles_missing:
print("Cannot run the program, download all needed files first.")
print("Readme has info on what files to download from government.")
quit()
cant_continue = True
while cant_continue:
my_adress = input("Enter an adress: ")
try:
expandbox = int(input("Enter number of meters to be added (100m-1000m, default=400m): "))
except ValueError:
expandbox = 400
if expandbox > 1000:
expandbox = 1000
if expandbox < 100:
expandbox = 100
url = "https://loc.geopunt.be/v4/Location?q=" + my_adress
r = requests.get(url)
try:
r_json = json.loads(r.text)["LocationResult"][0]
except IndexError:
print("that adress is not recognized...")
continue
bbox = r_json.get('BoundingBox', {})
lowerleft_x = bbox["LowerLeft"]["X_Lambert72"]
lowerleft_y = bbox["LowerLeft"]["Y_Lambert72"]
upperright_x = bbox["UpperRight"]["X_Lambert72"]
upperright_y = bbox["UpperRight"]["Y_Lambert72"]
print(f"Total size is {upperright_x - lowerleft_x + 2*expandbox}m, by {upperright_y - lowerleft_y + 2*expandbox}m")
if ((upperright_x - lowerleft_x + expandbox) < 1501) or ((upperright_y - lowerleft_y + expandbox) < 1501):
cant_continue = False
else:
print("That area is too large... Try again")
x_offset = 0
y_offset = 0
if len(json.loads(r.text)["LocationResult"]) == 1:
r_json = json.loads(r.text)["LocationResult"][0]
bbox = r_json.get('BoundingBox', {})
lowerleft_x = bbox["LowerLeft"]["X_Lambert72"] + x_offset
lowerleft_y = bbox["LowerLeft"]["Y_Lambert72"] + y_offset
upperright_x = bbox["UpperRight"]["X_Lambert72"] + x_offset
upperright_y = bbox["UpperRight"]["Y_Lambert72"] + y_offset
else:
print("Addres not found, please check for typos etc...")
# Check in what NGI map the adress coordinates are located
be_shp = gpd.read_file(be_shp_path)
lowerleft = Point(lowerleft_x - expandbox, lowerleft_y - expandbox)
upperleft = Point(lowerleft_x - expandbox, upperright_y + expandbox)
lowerright = Point(upperright_x + expandbox, lowerleft_y - expandbox)
upperright = Point(upperright_x + expandbox, upperright_y + expandbox)
lowerleft_lst = be_shp.loc[be_shp["geometry"].apply(lambda x: lowerleft.within(x)) == True]["CODE"].tolist()
upperleft_lst = be_shp.loc[be_shp["geometry"].apply(lambda x: upperleft.within(x)) == True]["CODE"].tolist()
lowerright_lst = be_shp.loc[be_shp["geometry"].apply(lambda x: lowerright.within(x)) == True]["CODE"].tolist()
upperright_lst = be_shp.loc[be_shp["geometry"].apply(lambda x: upperright.within(x)) == True]["CODE"].tolist()
if len(lowerleft_lst) == 1 and len(upperleft_lst) == 1 and len(lowerright_lst) == 1 and len(upperright_lst) == 1:
print("Geometry points all within unique NGI maps --> OK")
else:
print("Geometry points NGI map error, cannot process this location (flemish gov NGI map seems incorrect)")
print("Trying to continue anyway...")
mapnumbers = list(dict.fromkeys((upperleft_lst[0], upperright_lst[0], lowerleft_lst[0], lowerright_lst[0])))
if len(mapnumbers) == 1:
print(f"All bounding box points are in the same Ngi map with Nr: {lowerleft_lst[0]}")
else:
print("The property is ovelapping multiple Ngi maps:")
print("maps top: ", upperleft_lst[0], upperright_lst[0])
print("maps bottom: ", lowerleft_lst[0], lowerright_lst[0])
print("creating Tiff coutouts...")
def get_dsmdtm_path(dsmdtm, thismap) -> str:
dsmdtm = dsmdtm.upper()
myfile = f"DHMVII{dsmdtm}RAS1m_k{thismap.zfill(2)}/GeoTIFF/DHMVII{dsmdtm}RAS1m_k{thismap.zfill(2)}.tif"
myfilefullpath = f"{downloadfolder}{dsmdtm}_tif/{myfile}"
if os.path.exists(myfilefullpath) == False:
print("Cannot find the tif file you requested, missing file is:")
print(myfilefullpath)
quit()
else:
return myfile
def create_tif_cutouts(thismap):
geotif_DSM_file = os.path.join(downloadfolder_DSM, get_dsmdtm_path("DSM", thismap))
resized_DSM_geotif = os.path.join(outputfolder, f"output_DSM{thismap}.tif")
geotif_DTM_file = os.path.join(downloadfolder_DTM, get_dsmdtm_path("DTM", thismap))
resized_DTM_geotif = os.path.join(outputfolder, f"output_DTM{thismap}.tif")
gdal.Translate(resized_DSM_geotif, geotif_DSM_file, projWin=[lowerleft_x - expandbox, upperright_y + expandbox, upperright_x + expandbox, lowerleft_y - expandbox])
crop_white_border(resized_DSM_geotif)
gdal.Translate(resized_DTM_geotif, geotif_DTM_file, projWin=[lowerleft_x - expandbox, upperright_y + expandbox, upperright_x + expandbox, lowerleft_y - expandbox])
crop_white_border(resized_DTM_geotif)
# crop the image borders if they have white values
def crop_white_border(my_geotif_file):
with rasterio.open(my_geotif_file) as src:
window = rasterio.windows.get_data_window(src.read(1, masked=True))
# window = Window(col_off=13, row_off=3, width=757, height=711)
kwargs = src.meta.copy()
kwargs.update({
'height': window.height,
'width': window.width,
'transform': rasterio.windows.transform(window, src.transform)})
with rasterio.open(my_geotif_file, 'w', **kwargs) as dst:
dst.write(src.read(window=window))
def createfinal(dsmdtm, mylist):
with rasterio.open(mylist[0]) as src:
meta = src.meta.copy()
# The merge function returns a single array and the affine transform info
arr, out_trans = riomerge.merge(mylist)
meta.update({
"driver": "GTiff",
"height": arr.shape[1],
"width": arr.shape[2],
"transform": out_trans
})
# Write the mosaic raster to disk
with rasterio.open(os.path.join(outputfolder, f"output_{dsmdtm}.tif"), "w", **meta) as dest:
dest.write(arr)
dsm_list = []
dtm_list = []
for thismap in mapnumbers:
create_tif_cutouts(thismap)
dsm_list.append(os.path.join(outputfolder, f"output_DSM{thismap}.tif"))
dtm_list.append(os.path.join(outputfolder, f"output_DTM{thismap}.tif"))
createfinal("DSM", dsm_list)
createfinal("DTM", dtm_list)
print("creating xyz data of the surroundings for blender...")
# create xyz dataframes
resized_DSM_geotif = os.path.join(outputfolder, "output_DSM.tif")
xyz_DSM_file = os.path.join(outputfolder, "output_DSM.xyz")
resized_DTM_geotif = os.path.join(outputfolder, "output_DTM.tif")
xyz_DTM_file = os.path.join(outputfolder, "output_DTM.xyz")
geo_DSM_resized = gdal.Open(resized_DSM_geotif)
gdal.Translate(xyz_DSM_file, geo_DSM_resized)
df_dsm = pd.read_csv(xyz_DSM_file, sep=" ", header=None)
df_dsm.columns = ["x", "y", "z"]
geo_DTM_resized = gdal.Open(resized_DTM_geotif)
gdal.Translate(xyz_DTM_file, geo_DTM_resized)
df_dtm = pd.read_csv(xyz_DTM_file, sep=" ", header=None)
df_dtm.columns = ["x", "y", "z"]
df_final = pd.concat([df_dsm, df_dtm]).groupby(["x", "y"], as_index=False)["z"].sum()
final_csv = os.path.join(outputfolder, "final.csv")
df_blender = df_final.copy()
df_blender.columns = ["x", "y", "z"]
df_blender.reset_index(drop=True, inplace=True)
df_blender["z"] = df_blender['z'].where(df_blender['z'] > -1000, other=0)
x_med_blender = df_blender["x"].median()
y_med_blender = df_blender["y"].median()
z_min_blender = df_blender["z"].min()
df_blender["x"] = df_blender["x"] - x_med_blender
df_blender["y"] = df_blender["y"] - y_med_blender
df_blender["z"] = df_blender["z"] - z_min_blender
df_blender.to_csv(final_csv, sep=',', index=False)
print("Fetching sattelite images from MapBox...")
def sat_img_from_mapbox(mapbox_apikey):
transformer = Transformer.from_crs(crs_from=31370, crs_to=4326)
mypoint = (lowerleft_x - expandbox, lowerleft_y - expandbox)
long1, latt1 = transformer.transform(mypoint[0], mypoint[1])
mypoint2 = (lowerleft_x + expandbox, lowerleft_y + expandbox)
long2, latt2 = transformer.transform(mypoint2[0], mypoint2[1])
baselink = f"https://api.mapbox.com/styles/v1/mapbox/satellite-v9/static/[{latt1},{long1},{latt2},{long2}]/1280x1280?access_token={mapbox_apikey}"
urllib.request.urlretrieve(baselink, f'{outputfolder}texture.jpeg')
def sat_img_plane_from_mapbox(mapbox_apikey, box):
transformer = Transformer.from_crs(crs_from=31370, crs_to=4326)
mypoint = (lowerleft_x - box, lowerleft_y - box)
long1, latt1 = transformer.transform(mypoint[0], mypoint[1])
mypoint2 = (lowerleft_x + box, lowerleft_y + box)
long2, latt2 = transformer.transform(mypoint2[0], mypoint2[1])
baselink = f"https://api.mapbox.com/styles/v1/mapbox/satellite-v9/static/[{latt1},{long1},{latt2},{long2}]/1280x1280?access_token={mapbox_apikey}"
urllib.request.urlretrieve(baselink, f'{outputfolder}texture_plane_{box}.jpeg')
mapbox_apikey_path = os.path.join(downloadfolder, "mapbox_api_key")
if os.path.exists(mapbox_apikey_path):
with open(mapbox_apikey_path) as f:
mapbox_apikey = f.readline()
sat_img_from_mapbox(mapbox_apikey)
sat_img_plane_from_mapbox(mapbox_apikey, 10000)
sat_img_plane_from_mapbox(mapbox_apikey, 2000)
sat_img_plane_from_mapbox(mapbox_apikey, 150000)
else:
mapbox_apikey = input("paste your mapbox api key to continue")
with open(mapbox_apikey_path, 'w') as f:
f.write(mapbox_apikey)
sat_img_from_mapbox(mapbox_apikey)
sat_img_plane_from_mapbox(mapbox_apikey, 10000)
sat_img_plane_from_mapbox(mapbox_apikey, 2000)
sat_img_plane_from_mapbox(mapbox_apikey, 150000)
print("Cycling trough shapefiles to get house area data...")
plot_df = gpd.read_file(BpnCapa_path, bbox=(lowerleft_x, upperright_y, upperright_x, lowerleft_y))
if plot_df["geometry"].count() == 0:
plot_df = gpd.read_file(BpnCapa_1_path, bbox=(lowerleft_x, upperright_y, upperright_x, lowerleft_y))
if plot_df["geometry"].count() == 1:
plotarea = plot_df.iloc[0]["OPPERVL"]
building_df = gpd.read_file(BpnRebu_path, mask=plot_df["geometry"][0])
if building_df["geometry"].count() == 0:
building_df = gpd.read_file(BpnRebu_1_path, mask=plot_df["geometry"][0])
if building_df["geometry"].count() == 0:
building_df = gpd.read_file(BpnCabu_path, mask=plot_df["geometry"][0])
building_df = gpd.overlay(plot_df, building_df, how='intersection', keep_geom_type=None, make_valid=True)
else:
building_df = None
buildingarea = 0
if building_df is not None:
for i in range(0, building_df["geometry"].count()):
buildingarea += building_df.iloc[i]["OPPERVL_2"]
print("Creating xyz data for house and plot in Blender...")
offsetbox = 2
plot_minx = plot_df["geometry"][0].bounds[0] - offsetbox
plot_miny = plot_df["geometry"][0].bounds[1] - offsetbox
plot_maxx = plot_df["geometry"][0].bounds[2] + offsetbox
plot_maxy = plot_df["geometry"][0].bounds[3] + offsetbox
dsm_plot = os.path.join(outputfolder, "output_plot_DSM.tif")
dtm_plot = os.path.join(outputfolder, "output_plot_DTM.tif")
dsm_plot_df = gdal.Translate(dsm_plot, resized_DSM_geotif, projWin=[plot_minx, plot_maxy, plot_maxx, plot_miny])
gdal.Translate(dsm_plot, resized_DSM_geotif, projWin=[plot_minx, plot_maxy, plot_maxx, plot_miny])
dtm_plot_df = gdal.Translate(dtm_plot, resized_DTM_geotif, projWin=[plot_minx, plot_maxy, plot_maxx, plot_miny])
gdal.Translate(dtm_plot, resized_DTM_geotif, projWin=[plot_minx, plot_maxy, plot_maxx, plot_miny])
xyz_DSM_plot_file = os.path.join(outputfolder, "output_plot_DSM.xyz")
xyz_DTM_plot_file = os.path.join(outputfolder, "output_plot_DTM.xyz")
dsm_plot_tif = gdal.Open(dsm_plot)
gdal.Translate(xyz_DSM_plot_file, dsm_plot_tif)
df_plot_dsm = pd.read_csv(xyz_DSM_plot_file, sep=" ", header=None)
df_plot_dsm.columns = ["x", "y", "z"]
dtm_plot_tif = gdal.Open(dtm_plot)
gdal.Translate(xyz_DTM_plot_file, dtm_plot_tif)
df_plot_dtm =
|
pd.read_csv(xyz_DTM_plot_file, sep=" ", header=None)
|
pandas.read_csv
|
#!/bin/bash
# See: https://github.com/pr3d4t0r/COVIDvu/blob/master/LICENSE
# vim: set fileencoding=utf-8:
from os.path import join
from pandas.core.indexes.datetimes import DatetimeIndex
from covidvu.pipeline.vujson import JH_CSSE_FILE_CONFIRMED
from covidvu.pipeline.vujson import JH_CSSE_FILE_CONFIRMED_DEPRECATED
from covidvu.pipeline.vujson import JH_CSSE_FILE_DEATHS
from covidvu.pipeline.vujson import JH_CSSE_FILE_DEATHS_DEPRECATED
from covidvu.pipeline.vujson import JH_CSSE_REPORT_PATH
from covidvu.pipeline.vujson import SITE_DATA
from covidvu.pipeline.vujson import dumpJSON
from covidvu.pipeline.vujson import parseCSSE
import sys
import re
import os
import json
import numpy as np
import pandas as pd
import pystan
from pystan.model import StanModel
N_SAMPLES = 3000
N_CHAINS = 3
N_DAYS_PREDICT = 14
MIN_CASES_FILTER = 50
MIN_NUMBER_DAYS_WITH_CASES = 10
MAX_TREEDEPTH = 12
PRIOR_LOG_CARRYING_CAPACITY = (0, 10)
PRIOR_MID_POINT = (0, 1000)
PRIOR_GROWTH_RATE = (0.5, 0.5)
PRIOR_SIGMA = (0, 10)
PREDICTIONS_PERCENTILES = (
(2.5, 97.5),
(25, 75),
)
PREDICTION_MEAN_JSON_FILENAME_WORLD = 'prediction-world-mean-%s.json'
PREDICTION_CI_JSON_FILENAME_WORLD = 'prediction-world-conf-int-%s.json'
PREDICTION_MEAN_JSON_FILENAME_US = 'prediction-US-mean-%s.json'
PREDICTION_CI_JSON_FILENAME_US = 'prediction-US-conf-int-%s.json'
def _getCountryToTrain(regionTrainIndex, confirmedCases):
topCountries = confirmedCases.iloc[-1, confirmedCases.columns.map(lambda c: c[0] != '!')]
topCountries = topCountries.sort_values(ascending=False)
regionName = topCountries.index[regionTrainIndex]
return regionName
def buildLogisticModel(priorLogCarryingCapacity = PRIOR_LOG_CARRYING_CAPACITY,
priorMidPoint = PRIOR_MID_POINT,
priorGrowthRate = PRIOR_GROWTH_RATE,
priorSigma = PRIOR_SIGMA,
nDaysName='nDays',
timeName='t',
casesLogName='casesLog',
) -> StanModel:
logisticGrowthModel = f'''
data {{
int<lower=0> {nDaysName};
vector[{nDaysName}] {timeName};
vector[{nDaysName}] {casesLogName};
}}
parameters {{
real logCarryingCapacity;
real midPoint;
real growthRate;
real<lower=0> sigma;
}}
transformed parameters {{
real carryingCap;
vector[{nDaysName}] casesLin;
carryingCap = pow(10, logCarryingCapacity);
casesLin = carryingCap * inv_logit(growthRate * ({timeName} - midPoint));
}}
model {{
logCarryingCapacity ~ uniform({priorLogCarryingCapacity[0]},{priorLogCarryingCapacity[1]});
midPoint ~ normal({priorMidPoint[0]}, {priorMidPoint[1]}) T[0,];
growthRate ~ normal({priorGrowthRate[0]},{priorGrowthRate[1]});
sigma ~ normal({priorSigma[0]},{priorSigma[1]}) T[0,];
{casesLogName} ~ normal(log(casesLin + 1), sigma);
}}
'''
logGrowthModel = pystan.StanModel(model_code=logisticGrowthModel)
return logGrowthModel
def _getPredictionsFromPosteriorSamples(t,
trace,
nDaysPredict,
predictionsPercentiles,
):
tPredict = np.arange(len(t) + nDaysPredict)
predictions = np.zeros((len(t)+nDaysPredict, trace.shape[0]))
for i in range(trace.shape[0]):
carryingCap = 10 ** trace['logCarryingCapacity'].iloc[i]
predictions[:, i] = carryingCap / (
1 + np.exp(-1.0 * trace['growthRate'].iloc[i] * (tPredict - trace['midPoint'].iloc[i])))
predictionsPercentilesTS = []
for qLow, qHigh in predictionsPercentiles:
predictionsLow = np.percentile(predictions, qLow, axis=1)
predictionsHigh = np.percentile(predictions, qHigh, axis=1)
predictionsPercentilesTS.append([predictionsLow, predictionsHigh])
predictionsMean = predictions.mean(axis=1)
return predictionsMean, predictionsPercentilesTS
def _castPredictionsAsTS(regionTSClean,
nDaysPredict,
predictionsMean,
predictionsPercentiles,
):
predictionsMeanTS = pd.Series(
index = pd.date_range(
start = regionTSClean.index[0],
end = regionTSClean.index[-1] + pd.Timedelta(nDaysPredict, 'D')
),
data = predictionsMean,
)
predictionsPercentilesTS = []
for qLow, qHigh in predictionsPercentiles:
predictionsLow = pd.Series(
index = pd.date_range(
start = regionTSClean.index[0],
end = regionTSClean.index[-1] + pd.Timedelta(nDaysPredict, 'D')
),
data = qLow,
)
predictionsHigh = pd.Series(
index = pd.date_range(
start = regionTSClean.index[0],
end = regionTSClean.index[-1] + pd.Timedelta(nDaysPredict, 'D')
),
data = qHigh,
)
predictionsPercentilesTS.append([predictionsLow, predictionsHigh])
return predictionsMeanTS, predictionsPercentilesTS
def predictLogisticGrowth(logGrowthModel: StanModel,
regionTrainIndex: int = None,
regionName: str = None,
confirmedCases = None,
target = 'confirmed',
subGroup = 'casesGlobal',
nSamples = N_SAMPLES,
nChains = N_CHAINS,
nDaysPredict = N_DAYS_PREDICT,
minCasesFilter = MIN_CASES_FILTER,
minNumberDaysWithCases = MIN_NUMBER_DAYS_WITH_CASES,
predictionsPercentiles = PREDICTIONS_PERCENTILES,
randomSeed = 2020,
**kwargs
):
"""Predict the region with the nth highest number of cases
Parameters
----------
logGrowthModel: A compiled pystan model
regionTrainIndex: Order countries from highest to lowest, and train the ith region
regionName: Overwrites regionTrainIndex as the region to train
confirmedCases: A dataframe of countries as columns, and total number of cases as a time series
(see covidvu.vujson.parseCSSE)
target: string in ['confirmed', 'deaths', 'recovered']
subGroup: A key in the output of covidvu.pipeline.vujson.parseCSSE
nSamples: Number of samples per chain of MCMC
nChains: Number of independent chains MCMC
nDaysPredict: Number of days ahead to predict
minCasesFilter: Minimum number of cases for prediction
minNumberDaysWithCases: Minimum number of days with at least minCasesFilter
predictionsPercentiles: Bayesian confidence intervals to evaluate
randomSeed: Seed for stan sampler
kwargs: Optional named arguments passed to covidvu.pipeline.vujson.parseCSSE
Returns
-------
regionTS: All data for the queried region
predictionsMeanTS: Posterior mean prediction
predictionsPercentilesTS: Posterior percentiles
trace: pymc3 trace object
regionTSClean: Data used for training
"""
maxTreeDepth = kwargs.get('maxTreedepth', MAX_TREEDEPTH)
if confirmedCases is None:
confirmedCases = parseCSSE(target,
siteData = kwargs.get('siteData', SITE_DATA),
jhCSSEFileConfirmed = kwargs.get('jhCSSEFileConfirmed',JH_CSSE_FILE_CONFIRMED),
jhCSSEFileDeaths = kwargs.get('jhCSSEFileDeaths',JH_CSSE_FILE_DEATHS),
jhCSSEFileConfirmedDeprecated = kwargs.get('jhCSSEFileConfirmedDeprecated',
JH_CSSE_FILE_CONFIRMED_DEPRECATED),
jhCSSEFileDeathsDeprecated = kwargs.get('jhCSSEFileDeathsDeprecated',
JH_CSSE_FILE_DEATHS_DEPRECATED),
jsCSSEReportPath = kwargs.get('jsCSSEReportPath',JH_CSSE_REPORT_PATH),
)[subGroup]
if regionName is None:
regionName = _getCountryToTrain(int(regionTrainIndex), confirmedCases)
else:
assert isinstance(regionName, str)
regionTS = confirmedCases[regionName]
regionTSClean = regionTS[regionTS > minCasesFilter]
if regionTSClean.shape[0] < minNumberDaysWithCases:
return None
regionTSClean.index = pd.to_datetime(regionTSClean.index)
t = np.arange(regionTSClean.shape[0])
regionTSCleanLog = np.log(regionTSClean.values + 1)
logisticGrowthData = {'nDays': regionTSClean.shape[0],
't': list(t),
'casesLog': list(regionTSCleanLog)
}
fit = logGrowthModel.sampling(data=logisticGrowthData, iter=nSamples, chains=nChains, seed=randomSeed,
control={'max_treedepth':maxTreeDepth}
)
trace = fit.to_dataframe()
predictionsMean, predictionsPercentilesTS = _getPredictionsFromPosteriorSamples(t,
trace,
nDaysPredict,
predictionsPercentiles,
)
predictionsMeanTS, predictionsPercentilesTS = _castPredictionsAsTS(regionTSClean,
nDaysPredict,
predictionsMean,
predictionsPercentilesTS,
)
regionTS.index = pd.to_datetime(regionTS.index)
prediction = {
'regionTS': regionTS,
'predictionsMeanTS': predictionsMeanTS,
'predictionsPercentilesTS': predictionsPercentilesTS,
'trace': trace,
'regionTSClean': regionTSClean,
'regionName': regionName,
't': t,
}
return prediction
def _castDatetimeIndexToStr(timeSeries, dateCode = '%Y-%m-%d'):
timeSeries.index = timeSeries.index.map(lambda s: s.strftime(dateCode))
def _dumpTimeSeriesAsJSON(timeSeries, target = None):
assert isinstance(timeSeries.index, DatetimeIndex)
_castDatetimeIndexToStr(timeSeries)
result = {
timeSeries.name: timeSeries.to_dict(),
}
if target:
dumpJSON(result, target)
return result
def _dumpPredictionCollectionAsJSON(predictionsPercentilesTS,
regionName,
predictionsPercentiles,
target,
):
result = {}
for i, (qLow, qHigh) in enumerate(predictionsPercentiles):
tsLow = predictionsPercentilesTS[i][0]
tsHigh = predictionsPercentilesTS[i][1]
_castDatetimeIndexToStr(tsLow)
_castDatetimeIndexToStr(tsHigh)
result[qLow] = tsLow.to_dict()
result[qHigh] = tsHigh.to_dict()
result = {
regionName: result
}
if target:
dumpJSON(result, target)
return result
def _dumpRegionPrediction(prediction, siteData, predictionsPercentiles,
meanFilename=PREDICTION_MEAN_JSON_FILENAME_WORLD,
confIntFilename=PREDICTION_CI_JSON_FILENAME_WORLD,
):
regionNameSimple = ''.join(e for e in prediction['regionName'] if e.isalnum())
prediction['predictionsMeanTS'].name = prediction['regionName']
_dumpTimeSeriesAsJSON(prediction['predictionsMeanTS'],
join(siteData, meanFilename % regionNameSimple),
)
_dumpPredictionCollectionAsJSON(prediction['predictionsPercentilesTS'],
prediction['predictionsMeanTS'].name,
predictionsPercentiles,
join(siteData,
confIntFilename % regionNameSimple),
)
def predictRegions(regionTrainIndex,
target = 'confirmed',
predictionsPercentiles = PREDICTIONS_PERCENTILES,
siteData = SITE_DATA,
subGroup = 'casesGlobal',
jhCSSEFileConfirmed = JH_CSSE_FILE_CONFIRMED,
jhCSSEFileDeaths = JH_CSSE_FILE_DEATHS,
jhCSSEFileConfirmedDeprecated = JH_CSSE_FILE_CONFIRMED_DEPRECATED,
jhCSSEFileDeathsDeprecated = JH_CSSE_FILE_DEATHS_DEPRECATED,
jsCSSEReportPath = JH_CSSE_REPORT_PATH,
priorLogCarryingCapacity = PRIOR_LOG_CARRYING_CAPACITY,
priorMidPoint = PRIOR_MID_POINT,
priorGrowthRate = PRIOR_GROWTH_RATE,
priorSigma = PRIOR_SIGMA,
logRegModel = None,
**kwargs
):
"""Generate forecasts for regions
Parameters
----------
regionTrainIndex: If an integer, trains the region ranked i+1 in order of total number of cases. If 'all',
predicts all regions
target: A string in ['confirmed', 'deaths', 'recovered']
predictionsPercentiles: The posterior percentiles to compute
siteData: The directory for output data
subGroup:
jhCSSEFileConfirmed:
jhCSSEFileDeaths
jhCSSEFileConfirmedDeprecated
jhCSSEFileDeathsDeprecated
jsCSSEReportPath
priorLogCarryingCapacity
priorMidPoint
priorGrowthRate
priorSigma
logRegModel
kwargs: Optional named arguments for covidvu.predictLogisticGrowth
Returns
-------
JSON dump of mean prediction and confidence intervals
"""
if logRegModel is None:
print('Building model. This may take a few moments...')
logRegModel = buildLogisticModel(priorLogCarryingCapacity= priorLogCarryingCapacity,
priorMidPoint = priorMidPoint,
priorGrowthRate = priorGrowthRate,
priorSigma = priorSigma,
)
print('Done.')
else:
assert isinstance(logRegModel, StanModel)
if re.search(r'^\d+$', str(regionTrainIndex)):
print(f'Training index {regionTrainIndex}')
prediction = predictLogisticGrowth(logRegModel,
regionTrainIndex = regionTrainIndex,
predictionsPercentiles = predictionsPercentiles,
target = target,
siteData = siteData,
jhCSSEFileConfirmed = jhCSSEFileConfirmed,
jhCSSEFileDeaths = jhCSSEFileDeaths,
jhCSSEFileConfirmedDeprecated = jhCSSEFileConfirmedDeprecated,
jhCSSEFileDeathsDeprecated = jhCSSEFileDeathsDeprecated,
jsCSSEReportPath = jsCSSEReportPath,
**kwargs
)
if subGroup == 'casesGlobal':
_dumpRegionPrediction(prediction, siteData, predictionsPercentiles)
elif subGroup == 'casesUSStates':
_dumpRegionPrediction(prediction, siteData, predictionsPercentiles,
meanFilename=PREDICTION_MEAN_JSON_FILENAME_US,
confIntFilename=PREDICTION_CI_JSON_FILENAME_US,)
else:
raise NotImplementedError
print('Done.')
elif regionTrainIndex == 'all':
confirmedCases = parseCSSE(target,
siteData = siteData,
jhCSSEFileConfirmed = jhCSSEFileConfirmed,
jhCSSEFileDeaths = jhCSSEFileDeaths,
jhCSSEFileConfirmedDeprecated = jhCSSEFileConfirmedDeprecated,
jhCSSEFileDeathsDeprecated = jhCSSEFileDeathsDeprecated,
jsCSSEReportPath = jsCSSEReportPath,
)[subGroup]
countriesAll = confirmedCases.columns[confirmedCases.columns.map(lambda c: c[0]!='!')]
for regionName in countriesAll:
print(f'Training {regionName}...')
prediction = predictLogisticGrowth(logRegModel,
regionName = regionName,
confirmedCases = confirmedCases,
predictionsPercentiles = predictionsPercentiles,
target = target,
siteData = siteData,
jhCSSEFileConfirmed = jhCSSEFileConfirmed,
jhCSSEFileDeaths = jhCSSEFileDeaths,
jhCSSEFileConfirmedDeprecated = jhCSSEFileConfirmedDeprecated,
jhCSSEFileDeathsDeprecated = jhCSSEFileDeathsDeprecated,
jsCSSEReportPath = jsCSSEReportPath,
**kwargs,
)
if prediction:
if subGroup == 'casesGlobal':
_dumpRegionPrediction(prediction, siteData, predictionsPercentiles)
elif subGroup == 'casesUSStates':
_dumpRegionPrediction(prediction, siteData, predictionsPercentiles,
meanFilename=PREDICTION_MEAN_JSON_FILENAME_US,
confIntFilename=PREDICTION_CI_JSON_FILENAME_US, )
else:
raise NotImplementedError
print('Saved.')
else:
print('Skipped.')
print('Done.')
else:
raise NotImplementedError
def getSavedShortCountryNames(siteData = SITE_DATA,
confIntFilename=PREDICTION_CI_JSON_FILENAME_WORLD,
):
regionNameShortAll = []
pattern = '^' + confIntFilename.replace('%s', '(.*\w)')
for filename in os.listdir(siteData):
match = re.search(pattern, filename)
if match:
regionNameShort = match.groups()[0]
regionNameShortAll.append(regionNameShort)
return regionNameShortAll
def load(regionIndex = None,
regionNameShort = None,
siteData=SITE_DATA,
meanFilename=PREDICTION_MEAN_JSON_FILENAME_WORLD,
confIntFilename=PREDICTION_CI_JSON_FILENAME_WORLD,):
if regionNameShort is None:
assert isinstance(regionIndex, int)
regionNameShortAll = getSavedShortCountryNames(siteData=siteData)
regionNameShort = regionNameShortAll[regionIndex]
assert abs(regionIndex) < len(regionNameShortAll)
else:
assert isinstance(regionNameShort, str)
with open(join(siteData, confIntFilename % regionNameShort)) as jsonFile:
confidenceIntervals = json.load(jsonFile)
with open(join(siteData, meanFilename % regionNameShort)) as jsonFile:
meanPrediction = json.load(jsonFile)
meanPredictionTS = pd.Series(list(meanPrediction.values())[0])
meanPredictionTS.index =
|
pd.to_datetime(meanPredictionTS.index)
|
pandas.to_datetime
|
import pandas as pd
df2 = pd.DataFrame()
for comuna in ['macul','nunoa','providencia','santiago','la_granja','san_joaquin']:
geocode = comuna+'_geocode.csv'
comuna = comuna+'.csv'
df1 = pd.read_csv(comuna)
df0 = pd.read_csv(geocode)
aux = pd.concat([df0,df1[['Mesa']]],axis=1)
df2 = df2.append(aux, ignore_index=True)
df2.comuna.replace({
'LaGranja':'LA GRANJA',
'SanJoaquin':'<NAME>',
'Santiago':'SANTIAGO',
'Ñuñoa':'ÑUÑOA',
'Macul':'MACUL',
'Providencia':'PROVIDENCIA'
},inplace=True)
df3 = pd.read_csv('Diputados 2017_resultados_d10.csv')
df4 = pd.read_csv('Intención de Votos - Diputados.csv',header=None)
df5 =
|
pd.merge(df3,df4[[2,3]], left_on='Candidato',right_on=2)
|
pandas.merge
|
from requests import Session
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from datetime import datetime
HEADERS = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '\
'AppleWebKit/537.36 (KHTML, like Gecko) '\
'Chrome/75.0.3770.80 Safari/537.36'}
def search_symbol(symbol):
"""
Search for symbol's link in Simply Wall Street
"""
# Create Session
s = Session()
# Add headers
s.headers.update(HEADERS)
# JSON Key Field
url = f'https://api.simplywall.st/api/search/{symbol}'
# Request and transform response in json
screener = s.get(url)
json = screener.json()
if len(json) != 0:
# Stock URL
stock_url = json[0]['url']
else:
stock_url = 'not found'
return stock_url
def extract_all_urls(stocks_path='../docs/my_stocks.feather'):
"""
Create file with urls to call api
"""
# Read csv with stocks
my_stocks_df = pd.read_feather(stocks_path)
# Create List with stocks
my_stocks_list = list(my_stocks_df['symbol'].unique())
# Find all urls and store in a dataframe
results = []
for stock in my_stocks_list:
print(stock)
url = search_symbol(stock)
results.append([stock, url])
# Convert into a dataframe
results_df = pd.DataFrame(results, columns=['symbol', 'url'])
# Export to csv
results_df.to_csv('../docs/simplywallurls.csv', index=0)
return results_df
def symbol_data(stock_url):
"""
Extract data from Simply Wall Steet
"""
# Create Session
s = Session()
# Add headers
s.headers.update(HEADERS)
# JSON Key Field
metrics_url = f'https://api.simplywall.st/api/company{stock_url}?include=info%2Cscore%2Cscore.snowflake%2Canalysis.extended.raw_data%2Canalysis.extended.raw_data.insider_transactions&version=2.0'
# Request and transform response in json
screener = s.get(metrics_url)
# check status
if screener.status_code == 200:
json = screener.json()
else:
json = 'not found'
return json
def extract_values(json_response, symbol):
"""
Extract important values from json_response for each symbol
"""
# Define important fields
fields_dictionary = {'total_assets': 'total_assets',
'total_current_assets': 'total_ca',
'cash_st_investments': 'cash_st_invest',
'total_receivables': 'total_receiv',
'inventory': 'inventory',
'net_property_plant_equip': 'nppe',
'short_term_debt': 'current_port_capital_leases',
'total_current_liabilities': 'total_cl',
'long_term_debt': 'lt_debt',
'total_liabilities': 'total_liabilities',
'total_equity': 'total_equity',
'accounts_payable': 'ap',
'total_revenue_ttm': 'total_rev',
'ebt_ttm':'ebt',
'ebitda_ttm': 'ebitda',
'ebit_ttm': 'ebit',
'pre_tax_income': 'earning_co',
'gross_profit_ttm': 'gross_profit',
'net_income_ttm': 'ni',
'g_a_expense_ttm': 'g_a_expense',
'income_tax_ttm': 'income_tax',
'interest_exp_ttm': 'interest_exp',
'basic_eps_ttm': 'basic_eps',
'net_oper_cf_ttm': 'cash_oper',
'net_investing_cf_ttm': 'cash_f_investing',
'net_financing_cf_ttm': 'cash_f_financing',
'levered_fcf_ttm': 'levered_fcf',
'capex_ttm': 'capex',
'beta_5yr': 'beta_5yr'}
# Check response code
if json_response != 'not found':
# Get to fields that really matter
assets = json_response['data']['analysis']['data']['extended']['data']['raw_data']['data']['past']
# check if there's data
if len(assets) > 0:
# Extract Available dates
dates = assets.keys()
# Create empty list to store results
results = []
# Create first row with headers
headers = []
headers.append('date')
headers.append('symbol')
[headers.append(row) for row in list(fields_dictionary.keys())]
results.append(headers)
# For each date in dates
for date in dates:
# Create Temporary list to append results for each date
temp_results = []
temp_results.append(date)
temp_results.append(symbol)
# See available keys - not all fields are available all the time
available_keys = assets[date].keys()
# For field in list of fields to pull
for field in fields_dictionary.values():
# if field is available
if field in available_keys:
# create value and append that
value = assets[date][field]['value']
temp_results.append(value)
# if field doesn't exist then append NaN
else:
temp_results.append(np.nan)
# Append to results
results.append(temp_results)
return results
else:
return 'not found'
def extract_fundamentals(update_urls=False, urls_path='../docs/simplywallurls.csv'):
"""
Function to extract all fundamentals for all stocks
"""
# Check if we need to update list of urls
if update_urls == False:
# Read csv with stocks
urls_df = pd.read_csv(urls_path, header=0)
else:
urls_df = extract_all_urls()
# Create variable with total number of stocks so we can track progress
length = len(urls_df)
# create list to store results
results = []
# Loop through symbols
for index, row in urls_df.iterrows():
# Extract values
stock_url = row['url']
symbol = row['symbol']
# Print progress
print( str( round((((index + 1) / length) * 100), 2)) + '% Complete', symbol)
# If url is different than 'not found'
if row['url'] != 'not found':
# Extract json with values
stock_json_response = symbol_data(stock_url)
# Check if there's data
if stock_json_response != 'not found':
# Keep onlu relevant values
stock_numbers = extract_values(stock_json_response, symbol)
# Add that to results list
results.append(stock_numbers)
# Transform results into a dataframe, first create a list where every row is one record for each stock
to_df_list = [i for stock in results for i in stock]
# Convert it to a dataframe - dropping duplicates for headers (not the best solution)
df = pd.DataFrame(to_df_list, columns=to_df_list[0]).drop_duplicates()
# Remove first row with headers
df = df[1:]
# Export that
df.to_csv('../docs/my_stocks_fundamentals.csv', index=0)
return df
def update_fundamental_dates():
"""
Function to update fundamental data from Simply Wall Street
"""
# Import Fundamental Data and Earnings
df_fund =
|
pd.read_csv('../docs/my_stocks_fundamentals.csv')
|
pandas.read_csv
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
IntervalIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameSortIndex:
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame(
{"value": [1, 2, 3, 4]},
index=MultiIndex(
levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame(
{"value": [2, 1, 4, 3]},
index=MultiIndex(
levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_non_existent_label_multiindex(self):
# GH#12261
df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []]))
df.loc["b", "2"] = 1
df.loc["a", "3"] = 1
result = df.sort_index().index.is_monotonic
assert result is True
def test_sort_index_reorder_on_ops(self):
# GH#15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["red", "blu"]],
names=["letter", "size", "color"],
),
columns=["near", "far"],
)
df = df.sort_index()
def my_func(group):
group.index = ["newz", "newa"]
return group
result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index()
expected = MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["newa", "newz"]],
names=["letter", "size", None],
)
tm.assert_index_equal(result.index, expected)
def test_sort_index_nan_multiindex(self):
# GH#14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD"))
s = Series(np.arange(4), index=mi)
df2 = DataFrame(
{
"date": pd.DatetimeIndex(
[
"20121002",
"20121007",
"20130130",
"20130202",
"20130305",
"20121002",
"20121207",
"20130130",
"20130202",
"20130305",
"20130202",
"20130305",
]
),
"user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
"whole_cost": [
1790,
np.nan,
280,
259,
np.nan,
623,
90,
312,
np.nan,
301,
359,
801,
],
"cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12],
}
).set_index(["date", "user_id"])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position="last")
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position="first")
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
from collections import defaultdict
from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'tagsets'])
class MetaData(BaseEstimator, TransformerMixin):
def count_meta_data(self, text):
"""
Returns the counts of different meta data based on the text
Args:
text (str): The text for which the meta data needs to generated
Returns:
dictionary: A dictionary with keys as the different meta data and values as their count
"""
counter = defaultdict(int)
# tokenize by sentences
sentence_list = sent_tokenize(text)
for sentence in sentence_list:
# tokenize each sentence into words and tag part of speech
pos_tags = nltk.pos_tag(word_tokenize(sentence))
# check each tags word and keep a count of verbs
for _, tag in pos_tags:
if tag.startswith('JJ'):
counter['adjective'] += 1
elif tag.startswith('NN'):
counter['noun'] += 1
elif tag.startswith('PRP'):
counter['pronoun'] += 1
elif tag.startswith('RB'):
counter['adverb'] += 1
elif tag.startswith('VB'):
counter['verb'] += 1
return counter
def fit(self, x):
return self
def transform(self, X):
"""
Returns a dataframe containing the meta data about the input text
Args:
X (numpy.array): A numpy array containing text for which meta data needs to generated
Returns:
DataFrame: A dataframe containing the meta data
"""
# apply count meta data for each text
X_tagged =
|
pd.Series(X)
|
pandas.Series
|
import warnings
warnings.simplefilter(action = 'ignore', category = UserWarning)
# Front matter
import os
import glob
import re
import pandas as pd
import numpy as np
import scipy.constants as constants
import sympy as sp
from sympy import Matrix, Symbol
from sympy.utilities.lambdify import lambdify
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib import gridspec
# Seaborn, useful for graphics
import seaborn as sns
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
rc = {'lines.linewidth': 1,
'axes.labelsize': 20,
'axes.titlesize': 20,
'legend.fontsize': 26,
'xtick.direction': u'in',
'ytick.direction': u'in'}
sns.set_style('ticks', rc=rc)
# Functions
def calc_V_bcc(a):
return a**3
def calc_V_hcp(a,c):
return (np.sqrt(3)/2)*a**2*c
def calc_dV_bcc(a,da):
return 3*a**2*da
def calc_dV_hcp(a,c,da,dc):
return np.sqrt( (np.sqrt(3)*a*c*da)**2 + ((np.sqrt(3)/2)*a**2*dc)**2 )
# Numeric Vinet EOS, used for everything except calculating dP
def VinetEOS(V,V0,K0,Kprime0):
A = V/V0
P = 3*K0*A**(-2/3) * (1-A**(1/3)) * np.exp((3/2)*(Kprime0-1)*(1-A**(1/3)))
return P
# Symbolic Vinet EOS, needed to calculate dP
def VinetEOS_sym(V,V0,K0,Kprime0):
A = V/V0
P = 3*K0*A**(-2/3) * (1-A**(1/3)) * sp.exp((3/2)*(Kprime0-1)*(1-A**(1/3)))
return P
# Create a covariance matrix from EOS_df with V0, K0, and K0prime; used to get dP
def getCov3(EOS_df, phase):
dV0 = np.float(EOS_df[EOS_df['Phase'] == phase]['dV0'])
dK0 = np.float(EOS_df[EOS_df['Phase'] == phase]['dK0'])
dKprime0 = np.float(EOS_df[EOS_df['Phase'] == phase]['dKprime0'])
V0K0_corr = np.float(EOS_df[EOS_df['Phase'] == phase]['V0K0 corr'])
V0Kprime0_corr = np.float(EOS_df[EOS_df['Phase'] == phase]['V0Kprime0 corr'])
K0Kprime0_corr = np.float(EOS_df[EOS_df['Phase'] == phase]['K0Kprime0 corr'])
corr_matrix = np.eye(3)
corr_matrix[0,1] = V0K0_corr
corr_matrix[1,0] = V0K0_corr
corr_matrix[0,2] = V0Kprime0_corr
corr_matrix[2,0] = V0Kprime0_corr
corr_matrix[1,2] = K0Kprime0_corr
corr_matrix[2,1] = K0Kprime0_corr
sigmas = np.array([[dV0,dK0,dKprime0]])
cov = (sigmas.T@sigmas)*corr_matrix
return cov
# Create a covariance matrix with V, V0, K0, and K0prime; used to get dP
def getVinetCov(dV, EOS_df, phase):
cov3 = getCov3(EOS_df, phase)
cov = np.eye(4)
cov[1:4,1:4] = cov3
cov[0,0] = dV**2
return cov
def calc_dP_VinetEOS(V, dV, EOS_df, phase):
# Create function for Jacobian of Vinet EOS
a,b,c,d = Symbol('a'),Symbol('b'),Symbol('c'),Symbol('d') # Symbolic variables V, V0, K0, K'0
Vinet_matrix = Matrix([VinetEOS_sym(a,b,c,d)]) # Create a symbolic Vinet EOS matrix
param_matrix = Matrix([a,b,c,d]) # Create a matrix of symbolic variables
# Symbolically take the Jacobian of the Vinet EOS and turn into a column matrix
J_sym = Vinet_matrix.jacobian(param_matrix).T
# Create a numpy function for the above expression
# (easier to work with numerically)
J_Vinet = lambdify((a,b,c,d), J_sym, 'numpy')
J = J_Vinet(V,*getEOSparams(EOS_df, phase)) # Calculate Jacobian
cov = getVinetCov(dV, EOS_df, phase) # Calculate covariance matrix
dP = (J.T@cov@J).item() # Calculate uncertainty and convert to a scalar
return dP
def getEOSparams(EOS_df, phase):
V0 = np.float(EOS_df[EOS_df['Phase'] == phase]['V0'])
K0 = np.float(EOS_df[EOS_df['Phase'] == phase]['K0'])
Kprime0 = np.float(EOS_df[EOS_df['Phase'] == phase]['Kprime0'])
return V0, K0, Kprime0
def calc_rho(V,dV,M):
# Convert from cubic angstroms to cm^3/mol
V_ccpermol = (V/2)*constants.N_A/(10**24)
rho = M/V_ccpermol
drho = (M*2*10**24/constants.N_A)*(dV/(V**2))
return rho, drho
# Import EOS information
EOS_df =
|
pd.read_csv('FeAlloyEOS.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 28 13:57:46 2020
@author: erhan
"""
import pandas as pd
import numpy as np
import os.path
import matplotlib.pyplot as plt
# data from 2019.07 to 2020.08
for days in range(31):
data_file_name = "../collected_data/2020 08 %02d_data_erhan.txt" % (days+1)
if(os.path.isfile(data_file_name)):
data =
|
pd.read_csv(data_file_name,sep=':',header=None)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
def filter_data(data,field,start,end):
return data[data[field]>=start][data[field]<=end]
def create_yearly_data(data):
grouped = data.groupby('year')
yearly_sum = grouped.sum()
yearly_sum['count'] = grouped.size()
return yearly_sum
def create_counts_data(data,column):
return data.groupby(column).size()
def remove_outliers(data, column, num_deviations=2):
column_data = data[column]
valid_data = column_data[np.abs(column_data-column_data.mean())<=(num_deviations*column_data.std())]
return valid_data
def create_binned_data(data,column,num_bins=100):
result =
|
pd.cut(data,bins=num_bins,include_lowest=True)
|
pandas.cut
|
"""
Diversity-aware within- and Out-of-distribution performance evaluation.
"""
from time import time
import itertools
from collections import defaultdict
import numpy as np
import pandas as pd
from typing import List, Tuple, Optional, Iterator, Any
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from sklearn.compose import make_column_transformer
from sklearn.linear_model import LinearRegression
from propensity_score import estimate_ps, match_ps, stratify_ps
from sampling import (split_strata, get_indices, get_indices_cv,
get_indices_null)
from validation import cross_validate
def _add_indices_cont(df_match: pd.DataFrame, n_train_strata: int, n_add: int,
random_state: int = 0) \
-> Iterator[Tuple[np.ndarray, np.ndarray]]:
g = df_match.groupby('pair').mean().sort_values(['strata', 'ps'])
g.reset_index(inplace=True)
n_train_pairs = g.groupby('strata').count().min().iloc[0] * n_train_strata
pairs_discard = g.groupby('strata').first().pair
candidates = np.setdiff1d(g.iloc[:-n_train_pairs].pair, pairs_discard)
rs = np.random.RandomState(random_state)
selected = rs.permutation(candidates)[:n_add]
# contiguous chunk of pairs for training
idx_start = g[g.pair.isin(selected)].index
idx_end = idx_start + n_train_pairs
idx = df_match.index.to_numpy()
for i, j in zip(idx_start, idx_end):
mtrain = df_match.pair.isin(g.iloc[i:j].pair)
yield idx[mtrain], idx[~mtrain]
def _eval_samp_one(clf: Any, x: np.ndarray, y: np.ndarray,
df_match: pd.DataFrame, n_train_strata: int, kind: str,
n_splits: int = 10, n_draws: int = 20, n_jobs: int = 1) \
-> Tuple[np.ndarray, dict]:
n_draws = -np.abs(n_draws) if kind == 'div' else np.abs(n_draws)
kwds = dict(n_train_strata=n_train_strata, n_draws=n_draws)
# Out-of-distribution
if kind in ['cont', 'div']:
_, dict_strata = split_strata(df_match, **kwds)
idx_ood = get_indices(df_match, dict_strata, join=True)
strata_train = dict_strata['train']
is_strata = True
# Add draws if necessary
n_rem = n_draws - strata_train.shape[0]
if kind == 'cont' and n_rem > 0:
idx_ood = list(idx_ood)
idx_ood2 = _add_indices_cont(df_match, n_train_strata, n_rem)
idx_ood += list(idx_ood2)
strata_train = [idx[0] for idx in idx_ood]
is_strata = False
else: # random sampling scheme
idx_ood = list(get_indices_null(df_match, **kwds))
strata_train = [idx[0] for idx in idx_ood]
is_strata = False
coef, score = cross_validate(clf, x, y, df_match, idx_ood, n_jobs=n_jobs)
# Within-distribution - CV
idx_cv = get_indices_cv(df_match, strata_train, is_strata=is_strata,
n_splits=n_splits)
sc = cross_validate(clf, x, y, df_match, idx_cv, n_jobs=n_jobs)[1]
idx = pd.Index(np.repeat(range(len(strata_train)), n_splits),
name='draw')
df = sc['default']['test'].droplevel('draw').set_index(idx, append=True)
score['default']['cv'] = df.groupby(['set', 'draw', 'chunk']).mean()
return coef, score
def evaluate_sampling(clf: Any, x: np.ndarray, y: np.ndarray,
df_match: pd.DataFrame,
kinds: Optional[List[str]] = None, n_draws: int = 20,
list_train_strata: Optional[list] = None,
n_splits: int = 10, n_jobs: int = 1,
verbose: bool = False) -> Tuple[dict, dict]:
"""Evaluate sampling schemes.
Parameters
----------
clf: estimator object
Sklearn-like predictive model with `coef_` attribute.
x: np.ndarray of shape (n_subjects, n_features)
Data to use for prediction.
y: np.ndarray of shape (n_subjects,)
Binary class label (e.g., diagnosis).
df_match: pd.DataFrame
Dataframe with matched subjects.
kinds: list of str, default=None
List of possible sampling schemes:
- 'cont': contiguous strata for training and remaining for test
- 'div': (at least) one non-contiguous strata for training
- 'null': random splitting of paired subjects in train/test
If None, evaluate all sampling schemes.
n_draws: int, default=20
Number of draws (aka train/test strata splits)
list_train_strata: list of int, default=None
List with different numbers of strata to use for training.
Evaluations are repeated for each number of training strata (remaining
strata are used for test). If None, use ``range(2, n_strata-1)``.
n_splits: int, default=10
Number of CV splits based only on train strata (aka
within-distribution performance).
n_jobs: int, default=1
Number of jobs to run in parallel.
verbose: bool, default=False
Verbosity.
Returns
-------
coef: dict
Dictionary of model coefficients for each sampling scheme in `kind`.
scores: dict
Performance scores for each sampling scheme in `kind`.
Scores for within (CV in train strata) and out-of-distribution (test
strata).
"""
kwds = dict(n_splits=n_splits, n_draws=np.abs(n_draws), n_jobs=n_jobs)
if list_train_strata is None:
n_strata = df_match.strata.nuique()
list_train_strata = np.arange(2, n_strata-1)
# Compare contiguous, diverse and random (aka null) sampling schemes
if kinds is None:
kinds = ['cont', 'div', 'null']
score = {k: defaultdict(list) for k in kinds}
coef = defaultdict(list)
for k, ns in itertools.product(kinds, list_train_strata):
t1 = time()
c, sc = _eval_samp_one(clf, x, y, df_match, ns, k, **kwds)
coef[k].append(c)
for s, v in sc['default'].items():
score[k][s].append(v)
if verbose:
print('{:<14} [{:>2}]: {:.3f}s'.format(k, ns, time()-t1))
splits = score['cont'].keys()
for k1, k2 in itertools.product(kinds, splits):
score[k1][k2] = pd.concat(score[k1][k2], keys=list_train_strata,
names=['n_train_strata'])
new_score = {}
for s in splits:
sc = pd.concat({k: v[s] for k, v in score.items()}, axis=1,
names=['kind'])
new_score[s] = sc.reorder_levels([1, 0], axis=1).sort_index(axis=1)
return coef, new_score
def evaluate_diversity(clf: Any, x: np.ndarray, y: np.ndarray,
df_match: pd.DataFrame, n_train_strata: int = 5,
keys_dissect: Optional[List[str]] = None,
n_splits: int = 10, n_jobs: int = 1,
verbose: bool = False) \
-> Tuple[pd.DataFrame, np.ndarray, dict]:
"""Evaluate classification performance vs diversity.
Parameters
----------
clf: estimator object
Sklearn-like predictive model with `coef_` attribute.
x: np.ndarray of shape (n_subjects, n_features)
Data to use for prediction.
y: np.ndarray of shape (n_subjects,)
Binary class label (e.g., diagnosis).
df_match: pd.DataFrame
Dataframe with matched subjects.
n_train_strata: int, default=5
Number of strata to use for training, remaining are used for test.
keys_dissect: list of str, default=None
Covariate names used to dissect performance. Only works with
categorical covariates (e.g., scan acquisition site, sex). If
n_splits: int, default=10
Number of CV splits based only on train strata (aka
within-distribution performance).
n_jobs: int, default=1
Number of jobs to run in parallel.
verbose: bool, default=False
Verbosity.
Returns
-------
df_strata: pd.DataFrame
Dataframe where each row represent a train/test split of strata.
See split_strata.
coef: dict
Dictionary of model coefficients for each sampling scheme in `kind`.
scores: dict
Performance scores for each sampling scheme in `kind`.
Scores for within (CV in train strata) and out-of-distribution (test
strata).
"""
# Generate all possible splits of n_train_strata strata for training
# and the remaining for test
df_strata, dict_strata = split_strata(df_match,
n_train_strata=n_train_strata)
t1 = time()
# Within-distribution - CV
idx = get_indices_cv(df_match, dict_strata['train'], n_splits=n_splits,
is_strata=True)
score_cv = cross_validate(clf, x, y, df_match, idx, n_jobs=n_jobs)[1]
t2 = time()
if verbose:
print(f'Within-distribution elapsed time: {t2-t1:.2f}s')
# Out-of-distribution
idx = list(get_indices(df_match, dict_strata))
coef, score = cross_validate(clf, x, y, df_match, idx, n_jobs=n_jobs,
keys_dissect=keys_dissect)
t3 = time()
if verbose:
print(f'Out-of-distribution elapsed time: {t3-t2:.2f}s')
score_cv = score_cv['default']['test']
score_cv = score_cv.reset_index(level='draw', drop=False)
score_cv['draw'] //= n_splits
score_cv = score_cv.set_index('draw', append=True)
score['default']['cv'] = score_cv.groupby(['set', 'draw', 'chunk']).mean()
return df_strata, coef, score
def decounfound(dec: str, df_conf: pd.DataFrame, x: np.ndarray,
site_col: Optional[str] = None,
cat: Optional[List[str]] = None) -> np.ndarray:
"""Deconfounding.
Parameters
----------
dec: {'rout', 'combat'}
Deconfounding approach.
df_conf: pd.DataFrame
Dataframe with confounds.
x: np.ndarray of shape (n_subjects, n_features)
Data to deconfound.
site_col: str, default=None
Column in `df_conf` holding acquisition site.
cat: list of str, default=None
Categorical covariates in `df_conf` that are not site. If None,
assumes no categorical columns in dataframe.
Returns
-------
x_dec: np.ndarray of shape (n_subjects, n_features)
Deconfounded data.
"""
cat = [] if cat is None else cat
if dec == 'combat' and site_col is None:
raise ValueError("Combat requires site information: site_col is None")
cat_site = cat if site_col is None else cat + [site_col]
scale_keys = np.setdiff1d(df_conf.columns, cat_site)
if dec == 'rout':
ct = make_column_transformer((MinMaxScaler(), scale_keys),
(OneHotEncoder(drop='first'), cat_site))
xconf = ct.fit_transform(df_conf)
logit = LinearRegression(normalize=False)
clf = logit.fit(xconf, x)
return x - (xconf @ clf.coef_.T)
if dec == 'combat':
from neuroCombat import neuroCombat
x = neuroCombat(
|
pd.DataFrame(x.T)
|
pandas.DataFrame
|
import plotly.express as px
import base64
import dash
import io
import pandas as pd
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
from layout_module import main_layout
import time
from tabs_module import tab2_content, tab1_content,tab3_content
import plotly.graph_objects as go
app = dash.Dash(external_stylesheets=[dbc.themes.SIMPLEX],suppress_callback_exceptions=True)
#LITERA SOLAR SLATE DARKLY COSMO FLATLY JOURNAL LUMEN MATERIA PULSE SANDSTONE
server = app.server
app.layout = main_layout
@app.callback(
Output("loading-1", 'children'),
Input(component_id='data-store', component_property='data'),
prevent_initial_callbacks=True
)
def spinners(data):
"""Handles the pre-loading functionality"""
time.sleep(1)
return
@app.callback(
Output(component_id='bins-html', component_property='style'),
Output(component_id='size-html', component_property='style'),
Output(component_id='symbol-html', component_property='style'),
Output(component_id='barmode-html', component_property='style'),
Output(component_id='marginaly-html', component_property='style'),
Output(component_id='text-html', component_property='style'),
Output(component_id='marginalx-html', component_property='style'),
Output(component_id='boxmode-html', component_property='style'),
Output(component_id='violinmode-html', component_property='style'),
Output(component_id='linegroup-html', component_property='style'),
Output(component_id='box_boolean-html', component_property='style'),
Output(component_id='show_points-html', component_property='style'),
Input('chart-type-dropdown', component_property='value')
)
def hide_options(chart_type):
if chart_type == 'Scatterplot':
return {'display':'none'},{'display':'block'},{'display':'block'}, {'display': 'none'},{'display':'block'},{'display':'block'},{'display':'block'}, {'display': 'none'}, {'display': 'none'},{'display': 'none'}, {'display': 'none'}, {'display': 'none'}
if chart_type == 'Lineplot':
return {'display':'none'},{'display':'none'},{'display':'none'}, {'display': 'none'},{'display':'none'},{'display':'none'},{'display':'none'}, {'display': 'none'}, {'display': 'none'},{'display':'block'}, {'display': 'none'}, {'display': 'none'}
if chart_type=='Histogram':
return {'display': 'block'}, {'display': 'none'}, {'display': 'none'}, {'display': 'none'},{'display':'none'},{'display':'none'},{'display':'block'}, {'display': 'none'}, {'display': 'none'},{'display': 'none'}, {'display': 'none'}, {'display': 'none'}
if chart_type=='Bar Charts':
return {'display': 'none'}, {'display': 'none'}, {'display': 'none'},{'display': 'block'},{'display':'none'},{'display':'none'},{'display':'none'}, {'display': 'none'}, {'display': 'none'},{'display': 'none'}, {'display': 'none'}, {'display': 'none'}
if chart_type=='Boxplot':
return {'display': 'none'}, {'display': 'none'}, {'display': 'none'},{'display': 'none'},{'display':'none'},{'display':'none'},{'display':'none'}, {'display': 'block'}, {'display': 'none'},{'display': 'none'}, {'display': 'none'}, {'display': 'none'}
if chart_type=='Violinplot':
return {'display': 'none'}, {'display': 'none'}, {'display': 'none'},{'display': 'none'},{'display':'none'},{'display':'none'},{'display':'none'}, {'display': 'none'}, {'display': 'block'},{'display': 'none'}, {'display': 'block'}, {'display': 'block'}
if chart_type=='Density Contour Charts':
return {'display': 'none'}, {'display': 'none'}, {'display': 'none'},{'display': 'none'},{'display':'block'},{'display':'none'},{'display':'block'}, {'display': 'none'}, {'display': 'none'},{'display': 'none'}, {'display': 'none'}, {'display': 'none'}
if chart_type=='Density Heatmap':
return {'display': 'none'}, {'display': 'none'}, {'display': 'none'},{'display': 'none'},{'display':'block'},{'display':'none'},{'display':'block'}, {'display': 'none'}, {'display': 'none'},{'display': 'none'}, {'display': 'none'}, {'display': 'none'}
@app.callback(
Output(component_id='data-store', component_property='data'),
Output(component_id='upload-status', component_property='children'),
Input(component_id='upload-widget',component_property='contents'),
Input(component_id='upload-widget', component_property='filename'),
Input(component_id='upload-widget', component_property='last_modified')
)
def upload_data(contents, filename,last_modified):
df = pd.DataFrame([])
success_message = str('')
try:
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
# timestamp = datetime.datetime.fromtimestamp(last_modified)
if 'csv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
# print(df.describe())
elif 'xlsx' in filename:
pass
else:
return None
success_message = 'file with name ' +str(filename) + " uploaded."
except Exception as e:
print(e)
return df.to_json( orient='split'),success_message
@app.callback(
Output(component_id='data-table', component_property='columns'),
Output(component_id='data-table', component_property='data'),
Input(component_id='data-store', component_property='data')
)
def view_data(json_data):
df = pd.read_json(json_data, orient='split')
df = df.head(5)
column_dict = [{'name':col, 'id':col} for col in df.columns]
data = df.to_dict('records')
return column_dict,data
@app.callback(
Output(component_id='x-axis', component_property='options'),
Output(component_id='y-axis', component_property='options'),
Output(component_id='size', component_property='options'),
Output(component_id='color', component_property='options'),
Output(component_id='text', component_property='options'),
Output(component_id='facetrow', component_property='options'),
Output(component_id='facetcolumn', component_property='options'),
Output(component_id='hover-text', component_property='options'),
Output(component_id='symbol', component_property='options'),
Output(component_id='linegroup-axis', component_property='options'),
Input(component_id='data-table', component_property='data')
)
def generate_options(data_dict):
df =
|
pd.DataFrame(data_dict)
|
pandas.DataFrame
|
import pandas as pd
def get_correlated_features(X, threshold, consider_sign=False):
"""Calculates correlation between all feature pairs in the input data.
Returns feature pairs having correlation higher than the threshold value.
Parameters
----------
X : pandas.DataFrame
numeric feature set used for EDA analysis
threshold : float
threshold for correlation above which feature pairs will be returned
consider_sign : boolean (optional)
determines whether correlation value has to be checked for magnitude
only or for sign (positive/ negative) also. Default checks only the magnitude.
Returns
-------
pandas.DataFrame
dataframe containing feature1, feature2, and corresponding correlation.
Examples
-------
>>> import pandas as pd
>>> X = pd.DataFrame({"age": [23, 13, 7, 45],
"height": [1.65, 1.23, 0.96, 1.55],
"income": [20, 120, 120, 25]})
>>> get_correlated_features(X, threshold=0.7)
"""
if not isinstance(X, pd.DataFrame):
raise TypeError("Feature set (X) should be of pandas dataframe type!")
if not isinstance(threshold, float):
raise TypeError("Threshold value should be a floating point number!")
features = list(X.columns)
correlated_feat =
|
pd.DataFrame(columns=["feature-1", "feature-2", "correlation"])
|
pandas.DataFrame
|
# ---------------------------------
# Prepare the data etc.
# ----------------------------------
import numpy as np
import pandas as pd
# train_x is the training data, train_y contains the target values, test_x is the test data
# stored in pandas DataFrames and Series (numpy arrays also used)
train = pd.read_csv('../input/sample-data/train.csv')
train_x = train.drop(['target'], axis=1)
train_y = train['target']
test_x = pd.read_csv('../input/sample-data/test.csv')
# Save training and test datasets in their original form for explanations
train_x_saved = train_x.copy()
test_x_saved = test_x.copy()
# Function to recover original training and test datasets
def load_data():
train_x, test_x = train_x_saved.copy(), test_x_saved.copy()
return train_x, test_x
# Store names of categorical variables to be converted in list
cat_cols = ['sex', 'product', 'medical_info_b2', 'medical_info_b3']
# -----------------------------------
# One-hot encoding
# -----------------------------------
# Load the data
train_x, test_x = load_data()
# -----------------------------------
# Concatenate the training and test datasets, and apply one-hot encoding via get_dummies()
all_x = pd.concat([train_x, test_x])
all_x = pd.get_dummies(all_x, columns=cat_cols)
# Resplit into training and test data
train_x = all_x.iloc[:train_x.shape[0], :].reset_index(drop=True)
test_x = all_x.iloc[train_x.shape[0]:, :].reset_index(drop=True)
# -----------------------------------
# Load the data
train_x, test_x = load_data()
# -----------------------------------
from sklearn.preprocessing import OneHotEncoder
# Encoding with the OneHotEncoder() function
ohe = OneHotEncoder(sparse=False, categories='auto')
ohe.fit(train_x[cat_cols])
# Create column names for dummy variables
columns = []
for i, c in enumerate(cat_cols):
columns += [f'{c}_{v}' for v in ohe.categories_[i]]
# Put created dummy variables into data frames
dummy_vals_train = pd.DataFrame(ohe.transform(train_x[cat_cols]), columns=columns)
dummy_vals_test = pd.DataFrame(ohe.transform(test_x[cat_cols]), columns=columns)
# Join the remaining variables
train_x = pd.concat([train_x.drop(cat_cols, axis=1), dummy_vals_train], axis=1)
test_x = pd.concat([test_x.drop(cat_cols, axis=1), dummy_vals_test], axis=1)
# -----------------------------------
# Label encoding
# -----------------------------------
# Load the data
train_x, test_x = load_data()
# -----------------------------------
from sklearn.preprocessing import LabelEncoder
# Loop over the categorical variables and apply label encoding
for c in cat_cols:
# Define labels based on the training data
le = LabelEncoder()
le.fit(train_x[c])
train_x[c] = le.transform(train_x[c])
test_x[c] = le.transform(test_x[c])
# -----------------------------------
# Feature hashing
# -----------------------------------
# Load the data
train_x, test_x = load_data()
# -----------------------------------
from sklearn.feature_extraction import FeatureHasher
# Loop over the categorical variables and apply feature hashing
for c in cat_cols:
# Using the FeatureHasher() function is slightly different from other encoders
fh = FeatureHasher(n_features=5, input_type='string')
# Convert the variable to a string and apply the FeatureHasher() function
hash_train = fh.transform(train_x[[c]].astype(str).values)
hash_test = fh.transform(test_x[[c]].astype(str).values)
# Add to a data frame
hash_train = pd.DataFrame(hash_train.todense(), columns=[f'{c}_{i}' for i in range(5)])
hash_test = pd.DataFrame(hash_test.todense(), columns=[f'{c}_{i}' for i in range(5)])
# Join with the original data frame
train_x = pd.concat([train_x, hash_train], axis=1)
test_x = pd.concat([test_x, hash_test], axis=1)
# Drop the original categorical variable columns
train_x.drop(cat_cols, axis=1, inplace=True)
test_x.drop(cat_cols, axis=1, inplace=True)
# -----------------------------------
# Frequency encoding
# -----------------------------------
# Load the data
train_x, test_x = load_data()
# -----------------------------------
# Loop over the categorical variables and apply frequency encoding
for c in cat_cols:
freq = train_x[c].value_counts()
# Replace each categorical variable with its frequency of occurrence
train_x[c] = train_x[c].map(freq)
test_x[c] = test_x[c].map(freq)
# -----------------------------------
# Target encoding
# -----------------------------------
# Load the data
train_x, test_x = load_data()
# -----------------------------------
from sklearn.model_selection import KFold
# Loop over the categorical variables and apply target encoding
for c in cat_cols:
# Calculate the average of the target for each categorical value in the training data
data_tmp =
|
pd.DataFrame({c: train_x[c], 'target': train_y})
|
pandas.DataFrame
|
import json
import logging
import networkx as nx
import numpy as np
import pandas as pd
from more_itertools import unique_everseen
from statsmodels.nonparametric.smoothers_lowess import lowess
from pysrc.papers.utils import cut_authors_list, rgb2hex
logger = logging.getLogger(__name__)
class PlotPreprocessor:
@staticmethod
def component_sizes(df):
logger.debug('Processing component_sizes')
assigned_comps = df[df['comp'] >= 0]
d = dict(assigned_comps.groupby('comp')['id'].count())
return [int(d[k]) for k in range(len(d))]
@staticmethod
def component_size_summary_data(df, comps, min_year, max_year):
n_comps = len(comps)
components = [str(i + 1) for i in range(n_comps)]
years = list(range(min_year, max_year + 1))
data = {'years': years}
for c in range(n_comps):
data[str(c + 1)] = [len(df[np.logical_and(df['comp'] == c, df['year'] == y)])
for y in range(min_year, max_year + 1)]
return components, data
@staticmethod
def article_view_data_source(df, min_year, max_year, components_split, width=760):
columns = ['id', 'title', 'year', 'type', 'total', 'authors', 'journal', 'comp']
df_local = df[columns].copy()
# Correction of same year / total
df_local['count'] = 1 # Temporarily column to count clashes
if components_split:
df_counts = df_local[['comp', 'year', 'total', 'count']].groupby(['comp', 'year', 'total']).sum()
else:
df_counts = df_local[['year', 'total', 'count']].groupby(['year', 'total']).sum()
df_counts['delta'] = 0
dft =
|
pd.DataFrame(columns=columns + ['y'], dtype=object)
|
pandas.DataFrame
|
import logging
import time
import json
import mimetypes
import http.client
import warnings
import re
import spacy
import pandas as pd
import numpy as np
from langdetect import detect, DetectorFactory
from bs4 import BeautifulSoup
from tqdm.autonotebook import tqdm
# Add tqdm to pandas
tqdm.pandas(desc="Preprocess Data")
# Ignore warnings
warnings.filterwarnings('ignore')
# Configure logging
logging.basicConfig(format="%(asctime)s : %(levelname)s : %(message)s",
level=logging.INFO)
logger = logging.getLogger()
# Set seed for DetectorFactory
DetectorFactory.seed = 1
# NLP tokenizer and preprocessing
NLP = spacy.load('en_core_web_sm')
def get_regex_expression():
"""
Generate some regex expression
"""
# Match non alphanumeric characters
NON_ALPHANUMERIC_REGEX = r'[^a-zA-Z0-9À-ÿ\u00f1\u00d1\s]'
# Match any link or url from text
LINKS_REGEX = r'https?:\/\/.*[\r\n]'
# Match hashtags
HASHTAGS_REGEX = r'\#[^\s]*'
# Match twitter accounts
TWITTER_ACCOUNTS_REGEX = r'\@[^\s]*'
# Match Author:
AUTHOR_REGEX = r'author'
# Match email
EMAIL_REGEX = r"\S*@\S+"
# Group of regex
MATCHES_GROUPED = ('({})'.format(reg) for reg in [
LINKS_REGEX, HASHTAGS_REGEX, TWITTER_ACCOUNTS_REGEX, AUTHOR_REGEX,
EMAIL_REGEX, NON_ALPHANUMERIC_REGEX
])
# Regex for matches group
MATCHES_GROUPED_REGEX = r'{}'.format(('|'.join(MATCHES_GROUPED)))
return MATCHES_GROUPED_REGEX
REGEX = get_regex_expression()
def remove_unnecesary_text(text, regex):
"""
Remove unnecesary text using regex
Args:
text -- python string
regex -- python regex
Returns:
text -- python string
"""
return re.sub(regex, ' ', text, flags=re.M | re.I)
# Remove all whitespace characters
def remove_whitespace(text):
"""
Remove unnecesary whitespace
Args:
text -- python string
Returns:
text -- python string
"""
return ' '.join(text.split())
def preprocess_data(text, removing_stops=False, lemmatize=False):
"""
Preprocess string data.
Args:
text -- A string python that is on the columns of a pandas dataframe
regex -- Regular expression
removing_stops -- Boolean python, if True remove english stops words
lemmatize -- Boolean python, if True lemmatize english words
Returns:
text -- The Preprocess string data python
"""
# Clean text
text = remove_whitespace(remove_unnecesary_text(text, REGEX))
# Tokenize the text of the blogs
tokens = NLP(text)
# Remove all punctuation marks
tokens = [token for token in tokens if not token.is_punct]
# Remove numbers or amount representation
tokens = [token for token in tokens if not token.like_num]
if removing_stops:
# Remove stopswords
tokens = [token for token in tokens if not token.is_stop]
if lemmatize:
# Lemmatize words
tokens = [token.lemma_.strip().lower() for token in tokens]
else:
# Convert to str and lowerize
tokens = [token.text.strip().lower() for token in tokens]
tokens = [token for token in tokens if len(token) > 1]
return " ".join(tokens)
def download_dataset(page):
"""
Download dataset and save to a python list
Args:
page -- last page scarped
Returns:
data_temp -- python list containing dict for each blog data
"""
sw = True
data_temp = []
numblog = 0
while sw:
try:
conn = http.client.HTTPSConnection("koombeastaging.wpengine.com")
conn.request("GET",
f"//wp-json/wp/v2/posts?page={page}&per_page=1")
res = conn.getresponse()
data = res.read()
data = json.loads(data)
numblog += len(data)
data_temp = data_temp + data
page += 1
if numblog % 20 == 0:
logger.info("Downloading blogs = {0}".format(numblog))
time.sleep(2)
except Exception as e:
logger.error("Error! {0}".format(e))
sw = False
last_page = page - 1
return data_temp, last_page
def clean_html(html_content):
"""
Clean html form of the data
Argument:
html_content -- Blog's content in html form
Returns:
clean_text -- python string containing the blog's
content cleaned and parsed with the beatifulsoup html.parser method
"""
clean_text = None
soup = BeautifulSoup(html_content, "html.parser")
clean_text = soup.get_text()
return clean_text
def get_data_frame(page):
"""
Clean the data and generate a pandas dataframe with the values
Args:
page -- last page scrapped
Return:
df -- pandas dataframe with all the data and sort by id
"""
logger.info("Downloading Dataset on {0}/{1}".format(
"koombeastaging.wpengine.com", "//wp-json/wp/v2/posts?page&per_page"))
data_temp, last_page = download_dataset(page)
logger.info(
"Begin To clean datablogs and grab title and content information")
# Clean html form of data blogs
blogs = []
for blog in tqdm(data_temp, desc="Cleaning html data"):
info_blog = {}
info_blog["id"] = blog["id"]
info_blog["title"] = clean_html(blog["title"]["rendered"])
info_blog["content"] = clean_html(blog["content"]["rendered"])
info_blog["slug"] = clean_html(blog["slug"])
blogs.append(info_blog)
# Transform to a simple dataframe
df =
|
pd.DataFrame(blogs)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 14:38:59 2020
This script plots the co-occurence rates
@author: acn980
"""
import calendar
import pandas as pd
from datetime import date
import matplotlib.pyplot as plt
import numpy as np
import os,sys,glob
sys.path.insert(0,r'E:\github\seasonality_risk\Functions')
from Functions_HCMC import detrend_fft, remove_NaN_skew, ax_joint_mm, extract_MM, joint_mm_all_cooc, plot_cooc_CI
np.set_printoptions(precision=15)
#%%
save = False
fn_trunk = 'E:/surfdrive/Documents'
fn = os.path.join(fn_trunk, 'Master2019\Thomas\data\matlab_csv')
fn_files = 'Master2019/Thomas/data'
fn2 = os.path.join(fn_trunk,fn_files)
lag_joint = 0 #days
#%% We import the monthly data AND the dates
fn_tide = os.path.join(fn,'Tide_WACC_VungTau_Cleaned_Detrended_Strict_sel_const.csv')
date_parser = lambda x: pd.datetime.strptime(x, "%d-%m-%Y %H:%M:%S")
tide = pd.read_csv(fn_tide, parse_dates = True, date_parser= date_parser, index_col = 'Date')
tide.rename(columns = {tide.columns[0]:'tide'}, inplace = True)
all_tide = tide.resample('M').max()
tide_day = tide.resample('D').max()
# Importing monthly data - rainfall
allfiles = glob.glob(os.path.join(fn2, 'NewRain\TRENDS\MONTH_CORRECTED', 'Thiessen_*.csv'))
all_rain = pd.DataFrame(data=None)
for file in allfiles:
month = pd.read_csv(file, index_col = 'Year', parse_dates=True)
month.rename(columns={month.columns[0]:'Thiessen'}, inplace = True)
all_rain = pd.concat([all_rain, month], axis = 0)
#Importing the monthly data surge
allfiles = glob.glob(os.path.join(fn2, 'NewSurge\TRENDS\MONTH_RAW', 'skew_fft_*.csv'))
all_skew = pd.DataFrame(data=None)
for file in allfiles:
month = pd.read_csv(file, index_col = 'Year', parse_dates=True)
all_skew = pd.concat([all_skew, month], axis = 0)
fn_skew = os.path.join(fn,'skew_WACC_VungTau_Cleaned_Detrended_Strict_sel_const.csv')
skew = pd.read_csv(fn_skew, parse_dates = True, date_parser= date_parser, index_col = 'Date')
skew.rename(columns = {skew.columns[0]:'skew'}, inplace = True)
skew = remove_NaN_skew(skew)
skew_day = skew.resample('D').max()
skew_detrend = detrend_fft(skew_day, fillnavalue=0, frequency = 1./(2*365), figure_plotting =0)
skew_detrend_day = skew_detrend.resample('D').max()
sealevel = pd.concat([tide_day, skew_detrend_day], axis = 1)
sealevel['sealevel'] = sealevel.iloc[:,0]+sealevel.iloc[:,1]
sealevel = pd.DataFrame(sealevel['sealevel'])
dates_MM_sl = extract_MM(sealevel, freq='MS', label='sealevel')
all_seasevel = dates_MM_sl.drop('sealevel_date', axis = 1)
fn_out_ori = os.path.join(fn2,'NewSurge','TRENDS','DATES_MONTH_RAW', "skew_fft.csv")
dates_MM_skew = pd.read_csv(fn_out_ori, index_col='index')
dates_MM_skew.dropna(inplace = True)
dates_MM_skew['skew_date'] = [pd.datetime.strptime(x, "%Y-%m-%d") for x in dates_MM_skew['skew_date']]
dates_MM_skew['index'] = [pd.to_datetime(date(d.year, d.month, calendar.monthrange(d.year, d.month)[-1])) for d in dates_MM_skew['skew_date']]
dates_MM_skew.set_index('index',inplace = True)
dates_MM_skew = pd.concat([dates_MM_skew, all_skew], axis = 1).drop('skew', axis=1).copy()
dates_MM_skew.rename(columns={'skew_fft':'skew'}, inplace = True)
fn_out_ori = os.path.join(fn2,'NewRain','TRENDS','DATES_MONTH_RAW',"Thiessen_sum.csv")
dates_MM_rain = pd.read_csv(fn_out_ori, index_col='index')
dates_MM_rain.dropna(inplace = True)
dates_MM_rain['Thiessen_sum_date'] = [
|
pd.datetime.strptime(x, "%Y-%m-%d")
|
pandas.datetime.strptime
|
# -*- coding: utf-8 -*-
"""Functionality that extends on what the base StatsCan api returns in some way
TODO
----
Function to delete tables
Extend getChangedCubeList with a function that returns all tables updated
within a date range
"""
import os
import json
import zipfile
import h5py
import pandas as pd
import numpy as np
import requests
from stats_can.scwds import get_series_info_from_vector
from stats_can.scwds import get_data_from_vectors_and_latest_n_periods
from stats_can.scwds import get_bulk_vector_data_by_range
from stats_can.scwds import get_cube_metadata
from stats_can.scwds import get_full_table_download
from stats_can.helpers import parse_tables
from stats_can.helpers import parse_vectors
def get_tables_for_vectors(vectors):
""" get a list of dicts mapping vectors to tables
Parameters
----------
vectors : list of str or str
Vectors to find tables for
Returns
-------
tables_list: list of dict
keys for each vector number return the table, plus a key for
'all_tables' that has a list of unique tables used by vectors
"""
v_json = get_series_info_from_vector(vectors)
vectors = [j["vectorId"] for j in v_json]
tables_list = {j["vectorId"]: str(j["productId"]) for j in v_json}
tables_list["all_tables"] = []
for vector in vectors:
if tables_list[vector] not in tables_list["all_tables"]:
tables_list["all_tables"].append(tables_list[vector])
return tables_list
def table_subsets_from_vectors(vectors):
"""get a list of dicts mapping tables to vectors
Parameters
----------
vectors : list of str or str
Vectors to find tables for
Returns
-------
tables_dict: list of dict
keys for each table used by the vectors, matched to a list of vectors
"""
start_tables_dict = get_tables_for_vectors(vectors)
tables_dict = {t: [] for t in start_tables_dict["all_tables"]}
vecs = list(start_tables_dict.keys())[:-1] # all but the all_tables key
for vec in vecs:
tables_dict[start_tables_dict[vec]].append(vec)
return tables_dict
def download_tables(tables, path=None, csv=True):
"""Download a json file and zip of data for a list of tables to path
Parameters
----------
tables: list of str
tables to be downloaded
path: str, default: None (will do current directory)
Where to download the table and json
csv: boolean, default True
download in CSV format, if not download SDMX
Returns
-------
downloaded: list
list of tables that were downloaded
"""
metas = get_cube_metadata(tables)
for meta in metas:
product_id = meta["productId"]
zip_url = get_full_table_download(product_id, csv=csv)
if csv:
zip_file = product_id + "-eng.zip"
else:
zip_file = product_id + ".zip"
json_file = product_id + ".json"
if path:
zip_file = os.path.join(path, zip_file)
json_file = os.path.join(path, json_file)
# Thanks http://evanhahn.com/python-requests-library-useragent/
response = requests.get(zip_url, stream=True, headers={"user-agent": None})
# Thanks https://bit.ly/2sPYPYw
with open(json_file, "w") as outfile:
json.dump(meta, outfile)
with open(zip_file, "wb") as handle:
for chunk in response.iter_content(chunk_size=512):
if chunk: # filter out keep-alive new chunks
handle.write(chunk)
downloaded = [meta["productId"] for meta in metas]
return downloaded
def zip_update_tables(path=None, csv=True):
"""check local json, update zips of outdated tables
Grabs the json files in path, checks them against the metadata on
StatsCan and grabs updated tables where there have been changes
There isn't actually a "last modified date" part to the metadata
What I'm doing is comparing the latest reference period. Almost all
data changes will at least include incremental releases, so this should
capture what I want
Parameters
----------
path: str, default: None
where to look for tables to update
csv: boolean, default: True
Downloads updates in CSV form by default, SDMX if false
Returns
-------
update_table_list: list
list of the tables that were updated
"""
local_jsons = list_zipped_tables(path=path)
tables = [j["productId"] for j in local_jsons]
remote_jsons = get_cube_metadata(tables)
update_table_list = []
for local, remote in zip(local_jsons, remote_jsons):
if local["cubeEndDate"] != remote["cubeEndDate"]:
update_table_list.append(local["productId"])
download_tables(update_table_list, path, csv=csv)
return update_table_list
def zip_table_to_dataframe(table, path=None):
"""Reads a StatsCan table into a pandas DataFrame
If a zip file of the table does not exist in path, downloads it
Parameters
----------
table: str
the table to load to dataframe from zipped csv
path: str, default: current working directory when module is loaded
where to download the tables or load them
Returns:
df: pandas.DataFrame
the table as a dataframe
"""
# Parse tables returns a list, can only do one table at a time here though
table = parse_tables(table)[0]
table_zip = table + "-eng.zip"
if path:
table_zip = os.path.join(path, table_zip)
if not os.path.isfile(table_zip):
download_tables([table], path)
csv_file = table + ".csv"
with zipfile.ZipFile(table_zip) as myzip:
with myzip.open(csv_file) as myfile:
col_names = pd.read_csv(myfile, nrows=0).columns
# reopen the file or it misses the first row
with myzip.open(csv_file) as myfile:
types_dict = {"VALUE": float}
types_dict.update({col: str for col in col_names if col not in types_dict})
df = pd.read_csv(myfile, dtype=types_dict)
possible_cats = [
"GEO",
"DGUID",
"STATUS",
"SYMBOL",
"TERMINATED",
"DECIMALS",
"UOM",
"UOM_ID",
"SCALAR_FACTOR",
"SCALAR_ID",
"VECTOR",
"COORDINATE",
"Wages",
"National Occupational Classification for Statistics (NOC-S)",
"Supplementary unemployment rates",
"Sex",
"Age group",
"Labour force characteristics",
"Statistics",
"Data type",
"Job permanency",
"Union coverage",
"Educational attainment",
]
actual_cats = [col for col in possible_cats if col in col_names]
df[actual_cats] = df[actual_cats].astype("category")
try:
df["REF_DATE"] = pd.to_datetime(df["REF_DATE"], format="%Y-%m")
except TypeError:
df["REF_DATE"] = pd.to_datetime(df["REF_DATE"])
return df
def list_zipped_tables(path=None):
"""List StatsCan tables available
defaults to looking in the current working directory and for zipped CSVs
Parameters
----------
path: string or path, default None
Where to look for zipped tables
csv: boolean, default True
Whether to look for CSV or SDMX files
Returns
-------
tables: list
list of available tables json data
"""
# Find json files
jsons = [f for f in os.listdir(path) if f.endswith(".json")]
if path:
jsons = [os.path.join(path, j) for j in jsons]
tables = []
for j in jsons:
try:
with open(j) as json_file:
result = json.load(json_file)
if "productId" in result:
tables.append(result)
except ValueError as e:
print("failed to read json file" + j)
print(e)
return tables
def tables_to_h5(tables, h5file="stats_can.h5", path=None):
"""Take a table and its metadata and put it in an hdf5 file
Parameters
----------
tables: list of str
tables to add to the h5file
h5file: str, default stats_can.h5
name of the h5file to store the tables in
path: str or path, default = current working directory
path to the h5file
Returns
-------
tables: list
list of tables loaded into the file
"""
if path:
h5file = os.path.join(path, h5file)
tables = parse_tables(tables)
for table in tables:
hkey = "table_" + table
jkey = "json_" + table
zip_file = table + "-eng.zip"
json_file = table + ".json"
if path:
zip_file = os.path.join(path, zip_file)
json_file = os.path.join(path, json_file)
if not os.path.isfile(json_file):
download_tables([table], path)
df = zip_table_to_dataframe(table, path=path)
with open(json_file) as f_name:
df_json = json.load(f_name)
with
|
pd.HDFStore(h5file, "a")
|
pandas.HDFStore
|
import calcIsotopologues as ci
import fragmentAndSimulate as fas
from tqdm import tqdm
import matplotlib.pyplot as plt
import pandas as pd
'''
A set of functions for visualizing predicted spectra. These are useful when correlating observations with experimental data.
'''
def fullSpectrumVis(molecularDataFrame, byAtom, figsize = (10,4), lowAbundanceCutOff = 0, massError = 0,
xlim =(), ylim = ()):
'''
Visualizes the full spectrum (i.e. without fragmentation) based on the abundances of all isotopologues.
Inputs:
molecularDataFrame: A dataframe containing basic information about the molecule.
byAtom: A dictionary containing the isotopolouges of the molecule and their abundances. See calcIsotopologues for details.
figsize: The output figure size.
lowAbundanceCutOff: Do not show peaks below this relative abundance.
massError: In amu, shifts all observed peaks by this amount.
xlim: Set an xlim for the plot, as (xlow, xhigh)
ylim: as xlim.
Outputs:
None. Displays plot.
'''
selectedIsotopologues = byAtom
lowAbundanceCutOff = 0.00
massError = 0.000
siteElements = ci.strSiteElements(molecularDataFrame)
predictSpectrum = {}
#calculates the mass of each isotopologue as well as its substitution. Adds its absolute concentration to the predicted
#spectrum
for key, item in tqdm(selectedIsotopologues.items()):
mass = fas.computeMass(key, siteElements)
correctedMass = mass + massError
subs = fas.computeSubs(key, siteElements)
if correctedMass not in predictSpectrum:
predictSpectrum[correctedMass] = {'Abs. Abundance':0}
if 'Sub' not in predictSpectrum[correctedMass]:
predictSpectrum[correctedMass]['Sub'] = subs
predictSpectrum[correctedMass]['Abs. Abundance'] += item['Conc']
#Calculates the relative abundances, and places these, the masses, and substitutions into lists to plot.
totalAbundance = 0
for key, item in predictSpectrum.items():
totalAbundance += item['Abs. Abundance']
massPlot = []
relAbundPlot = []
subPlot = []
for key, item in predictSpectrum.items():
item['Rel. Abundance'] = item['Abs. Abundance'] / totalAbundance
massPlot.append(key)
relAbundPlot.append(item['Rel. Abundance'])
subPlot.append(item['Sub'])
#Constructs a figure; does not plot peaks below the relative abundance cut off.
fig, ax = plt.subplots(figsize = figsize)
massPlotcutOff = []
subPlotcutOff = []
for i in range(len(massPlot)):
if relAbundPlot[i] > lowAbundanceCutOff:
ax.vlines(massPlot[i], 0, relAbundPlot[i])
massPlotcutOff.append(massPlot[i])
subPlotcutOff.append(subPlot[i])
ax.set_xticks(massPlotcutOff)
labels = [str(round(x,5)) +'\n' + y for x,y in zip(massPlotcutOff,subPlotcutOff)]
ax.set_xticklabels(labels,rotation = 45);
if xlim != ():
ax.set_xlim(xlim[0],xlim[1]);
if ylim != ():
ax.set_ylim(ylim[0],ylim[1]);
ax.set_ylabel("Relative Abundance")
plt.show()
def MNSpectrumVis(molecularDataFrame, fragKey, predictedMeasurement, MNKey, MNDict, lowAbundanceCutOff = 0,
massError = 0, xlim = (), ylim = ()):
'''
Visualizes the fragmented spectrum of an M+N experiment based on the abundances of fragment peaks.
Inputs:
molecularDataFrame: A dataframe containing basic information about the molecule.
fragKey: A string identifying the fragment, e.g. '133', '44'.
predictedMeasurement: A dictionary giving information about the abundance of isotopic peaks in the fragment. See fragmentAndSimulate.predictMNFragmentExpt
MNKey: A string identifying the mass selection to visualize; e.g. 'M1', 'M2'. This population of isotopologues is selected prior to fragmentation.
MNDict: A dictionary; the keys are MNKeys ("M1", "M2") and the values are dictionaries containing the isotopologues present in each mass selection. See calcIsotopologues.massSelections
lowAbundanceCutOff: Do not show peaks below this relative abundance.
massError: In amu, shifts all observed peaks by this amount.
xlim: Set an xlim for the plot, as (xlow, xhigh)
ylim: as xlim.
Outputs:
None. Displays plot.
'''
toShow = predictedMeasurement[MNKey][fragKey]
siteElements = ci.strSiteElements(molecularDataFrame)
massPlot = []
relAbundPlot = []
subPlot = []
for subKey, observation in toShow.items():
#This section (until correctedMass) is an elaborate routine to get the mass of the isotopologues with that
#substitution. It may break in weird circumstances. We should try to improve this.
Isotopologues =
|
pd.DataFrame.from_dict(MNDict[MNKey])
|
pandas.DataFrame.from_dict
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 07_gbdt_local_feature_importance.ipynb (unless otherwise specified).
__all__ = ['XGBoostLFI', 'LocalFeatureImportance']
# Cell
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from collections import defaultdict
# Cell
class XGBoostLFI:
"Wrapper around for `XGBoost` models"
def __init__(self, model):
self.model = model
def get_tree(self, trees, tree_index):
"Return tree for a specific index."
mask = trees.Tree == tree_index
return trees.loc[mask]
def get_booster(self):
"Returns booster."
return self.model.get_booster()
def get_num_trees(self, trees_df):
"Returns number of number of estimators."
return trees_df.Tree.nunique()
def get_node(self, tree, node_id):
"Returns a particular node in a tree."
mask = tree.ID == node_id
return tree.loc[mask]
def get_node_index(self, node, branch):
"Returns index of a particular node in a tree."
if branch == 'left': return node['Yes'].values[0]
else: return node['No'].values[0]
def get_node_id(self, node):
"Returns id of a particular node in a tree. It is different from a node-index."
return node['ID'].values[0]
def next_node(self, tree, curr_node, branch):
"Returns next node from a current node based on which sub-branch one wants to navigate."
if branch == 'left': return self.get_node(tree, self.get_node_index(curr_node, branch))
else: return self.get_node(tree, self.get_node_index(curr_node, branch))
def get_split(self, node):
"Returns split value"
return node['Split'].values[0]
def get_feature(self, node):
"Returns feature that was used to make the split."
return node['Feature'].values[0]
def node_score(self, node):
"Returns gain for a particular decision node."
return node['Gain'].values[0]
def node_parent_score(self, node):
"Returns parent score for a particular node"
return node['Parent_Score'].values[0]
def node_cover(self, node):
"Returns how many samples are there in a terminal node."
return node['Cover'].values[0]
def average(self, left_node, right_node):
"Returns average of scores of children of a node."
return (left_node['score'] + right_node['score']) / 2
def weighted_sum(self, left_node, right_node):
"Returns weighted average of children of a node."
return ((left_node['cover'] * left_node['score'] + right_node['cover'] * right_node['score'])) /\
(left_node['cover'] + right_node['cover'])
def propagation_strategy(self, left_node, right_node, strategy='average'):
"Returns parent score from left and right children. It is based on propagation strategy."
if strategy == 'average': return self.average(left_node, right_node)
else: return self.weighted_sum(left_node, right_node)
# Cell
class LocalFeatureImportance:
"Calculates `Feature Importance` and provides explanability. It implements (http://www.cs.sjtu.edu.cn/~kzhu/papers/kzhu-infocode.pdf)"
def __init__(self, model, strategy='average'):
self.strategy = strategy
model_category = self.get_model_category(self.get_model_type(model))
if model_category == 'xgboost':
self.model = XGBoostLFI(model)
def trees_to_df(self):
"Convert internal tree reprensentation to a Pandas DataFrame"
if self.get_model_type(self.model) == xgb.core.Booster: return self.model.trees_to_dataframe()
else: return self.model.get_booster().trees_to_dataframe()
def get_model_type(self, model):
return type(model)
def get_model_category(self, model_type):
if model_type in [xgb.core.Booster, xgb.sklearn.XGBRegressor, xgb.sklearn.XGBClassifier]:
return 'xgboost'
else:
raise ValueError(f'{model_type} is not supported.')
def make_node(self, node):
"Create dict representation of a node which contains score and cover based on strategy."
if self.strategy == 'average': return {'score': self.model.node_parent_score(node)}
else: return {'score': self.model.node_parent_score(node), 'cover': self.model.node_cover(node)}
def parse(self, tree, node_id):
"Calculates and assigns scores for a particular node in the tree."
current_node = self.model.get_node(tree, node_id)
if self.model.get_feature(current_node) == 'Leaf': return self.model.node_score(current_node)
left_child = self.model.next_node(tree, current_node, branch='left')
right_child = self.model.next_node(tree, current_node, branch='right')
left_branch_score = self.parse(tree, self.model.get_node_id(left_child))
tree.loc[tree.ID == left_child.ID.values[0], 'Parent_Score'] = left_branch_score
right_branch_score = self.parse(tree, self.model.get_node_id(right_child))
tree.loc[tree.ID == right_child.ID.values[0], 'Parent_Score'] = right_branch_score
root_score = self.model.propagation_strategy(self.make_node(self.model.get_node(tree,
self.model.get_node_index(current_node, branch='left')
)),
self.make_node(self.model.get_node(tree,
self.model.get_node_index(current_node, branch='right')
)))
tree.loc[tree.ID == current_node.ID.values[0], 'Parent_Score'] = root_score
return root_score
def propagate_scores(self):
"Parse and calculates scores for all nodes for all trees."
trees_df = self.trees_to_df()
parsed_trees = []
num_trees = self.model.get_num_trees(trees_df)
for tree_index in range(num_trees):
tree = self.model.get_tree(trees_df, tree_index)
self.parse(tree, f'{tree_index}-0')
parsed_trees.append(tree)
return pd.concat(parsed_trees, axis=0)
def split_decision(self, feature, split_value):
"How to decide whether to go left or right in a tree."
if
|
pd.isnull(feature)
|
pandas.isnull
|
"""This module contains all functions related to stocks"""
#pylint: disable = trailing-whitespace,line-too-long, too-many-lines, no-name-in-module, multiple-imports, pointless-string-statement, too-many-arguments
import datetime
import io
import json
import random
from enum import Enum
import os
import requests
import typing
import pandas
import numpy
from .exceptions import (
InvalidBrokerage,
InvalidStockExchange,
BadRequest,
InvalidCredentials,
)
from .logs import start_logger
"""Config starts"""
USER_ACCESS_TOKEN = os.environ['USER_ACCESS_TOKEN']
USER_ACCOUNT_NUMBER = os.environ['USER_ACCOUNT_NUMBER']
USER_BROKERAGE = os.environ['USER_BROKERAGE']
TR_STREAMING_API_URL = "https://stream.tradier.com"
TR_BROKERAGE_API_URL = "https://production-api.tradier.com"
TR_SANDBOX_BROKERAGE_API_URL = "https://production-sandbox.tradier.com"
logger = start_logger(__name__)
def tr_get_headers(access_token: str) -> dict:
'''headers for TR brokerage'''
headers = {
"Accept": "application/json",
"Authorization": "Bearer " + access_token
}
return headers
def tr_get_content_headers() -> dict:
'''content headers for TR brokerage'''
headers = {
'Accept': 'application/json',
}
return headers
"""Config ends"""
"""Data APIs Start"""
def latest_price_info(symbols: str,
brokerage: typing.Any = USER_BROKERAGE,
access_token: str = USER_ACCESS_TOKEN,
dataframe: bool = False) -> dict:
"""Get latest price information for an individual or multiple symbols"""
if brokerage == "Tradier Inc.":
url = TR_BROKERAGE_API_URL
elif brokerage == "miscpaper":
url = TR_SANDBOX_BROKERAGE_API_URL
else:
raise InvalidBrokerage
response = requests.get(
"{}/v1/markets/quotes?symbols={}".format(url, str(symbols.upper())),
headers=tr_get_headers(access_token),
)
if response:
if 'quote' not in response.json()['quotes']:
return response.json()
data = response.json()['quotes']['quote']
if isinstance(data, list):
for i in data:
i['trade_date'] = datetime.datetime.fromtimestamp(
float(i['trade_date']) /
1000.0).strftime("%Y-%m-%d %H:%M:%S")
i['bid_date'] = datetime.datetime.fromtimestamp(
float(i['bid_date']) /
1000.0).strftime("%Y-%m-%d %H:%M:%S")
i['ask_date'] = datetime.datetime.fromtimestamp(
float(i['ask_date']) /
1000.0).strftime("%Y-%m-%d %H:%M:%S")
else:
data['trade_date'] = datetime.datetime.fromtimestamp(
float(data['trade_date']) /
1000.0).strftime("%Y-%m-%d %H:%M:%S")
data['bid_date'] = datetime.datetime.fromtimestamp(
float(data['bid_date']) / 1000.0).strftime("%Y-%m-%d %H:%M:%S")
data['ask_date'] = datetime.datetime.fromtimestamp(
float(data['ask_date']) / 1000.0).strftime("%Y-%m-%d %H:%M:%S")
if not dataframe:
return data
else:
if isinstance(data, list):
return pandas.DataFrame(data)
else:
return pandas.DataFrame([data])
if response.status_code == 400:
logger.error('Oops! An error Occurred ⚠️')
raise BadRequest(response.text)
if response.status_code == 401:
logger.error('Oops! An error Occurred ⚠️')
raise InvalidCredentials(response.text)
def create_session(brokerage: typing.Any = USER_BROKERAGE,
access_token: str = USER_ACCESS_TOKEN) -> str:
"""Create a live session to receive sessionid which is needed for streaming live quotes and trades"""
if brokerage == "miscpaper":
access_token = os.environ["KT_ACCESS_TOKEN"]
elif brokerage == "Tradier Inc.":
access_token = access_token
else:
raise InvalidBrokerage
response = requests.post(
"{}/v1/markets/events/session".format(TR_BROKERAGE_API_URL),
headers=tr_get_headers(access_token),
)
if response:
stream = response.json()["stream"]
sessionid = str(stream["sessionid"])
return sessionid
if response.status_code == 400:
raise BadRequest(response.text)
if response.status_code == 401:
raise InvalidCredentials(response.text)
def latest_quote(
symbols: str,
brokerage: typing.Any = USER_BROKERAGE,
access_token: str = USER_ACCESS_TOKEN,
) -> dict:
"""Get live quotes direct from various exchanges"""
# for flagging quotes:
# https://docs.dxfeed.com/misc/dxFeed_TimeAndSale_Sale_Conditions.htm
if brokerage == "miscpaper":
access_token = os.environ["KT_ACCESS_TOKEN"]
elif brokerage == "Tradier Inc.":
access_token = access_token
else:
raise InvalidBrokerage
payload = {
"sessionid": create_session(brokerage=brokerage,
access_token=access_token),
"symbols": symbols,
"filter": 'quote',
"linebreak": True,
}
response = requests.post(
"{}/v1/markets/events".format(TR_STREAMING_API_URL),
params=payload,
headers=tr_get_headers(access_token),
stream=True,
)
if response:
for data in response.iter_content(chunk_size=None,
decode_unicode=True):
lines = data.decode("utf-8").replace("}{", "}\n{").split("\n")
for line in lines:
_quotes = json.loads(line)
converted_biddata = float(_quotes["biddate"]) / 1000.0
converted_askdate = float(_quotes["biddate"]) / 1000.0
_quotes["biddate"] = datetime.datetime.fromtimestamp(
converted_biddata).strftime("%Y-%m-%d %H:%M:%S")
_quotes["askdate"] = datetime.datetime.fromtimestamp(
converted_askdate).strftime("%Y-%m-%d %H:%M:%S")
return _quotes
if response.status_code == 400:
logger.error('Oops! An error Occurred ⚠️')
raise BadRequest(response.text)
if response.status_code == 401:
if response.text == "Session not found":
create_session(brokerage=brokerage, access_token=access_token)
else:
logger.error('Oops! An error Occurred ⚠️')
raise InvalidCredentials(response.text)
def latest_trade(
symbols: str,
filter: str = "trade",
valid_only: bool = True,
brokerage: typing.Any = USER_BROKERAGE,
access_token: str = USER_ACCESS_TOKEN,
) -> dict:
"""Get live trades direct from various exchanges"""
# for flagging trades:
# https://docs.dxfeed.com/misc/dxFeed_TimeAndSale_Sale_Conditions.htm
if brokerage == "miscpaper":
access_token = os.environ["KT_ACCESS_TOKEN"]
elif brokerage == "Tradier Inc.":
pass
else:
raise InvalidBrokerage
sessionid: str = create_session(brokerage=USER_BROKERAGE,
access_token=USER_ACCESS_TOKEN)
payload = {
"sessionid": sessionid,
"symbols": symbols,
"filter": filter,
"linebreak": True,
"validOnly": valid_only,
}
response = requests.post(
"{}/v1/markets/events".format(TR_STREAMING_API_URL),
params=payload,
headers=tr_get_headers(access_token),
stream=True,
)
if response:
for data in response.iter_content(chunk_size=None,
decode_unicode=True):
lines = data.decode("utf-8").replace("}{", "}\n{").split("\n")
for line in lines:
trades = json.loads(line)
converted_date = float(trades["date"]) / 1000.0
trades["date"] = datetime.datetime.fromtimestamp(
converted_date).strftime("%Y-%m-%d %H:%M:%S")
return trades
if response.status_code == 400:
logger.error('Oops! An error Occurred ⚠️')
raise BadRequest(response.text)
if response.status_code == 401:
if response.text == "Session not found":
create_session(brokerage=brokerage, access_token=access_token)
else:
logger.error('Oops! An error Occurred ⚠️')
raise InvalidCredentials(response.text)
def intraday_summary(
symbols: str,
filter: str = "summary",
valid_only: bool = True,
brokerage: typing.Any = USER_BROKERAGE,
access_token: str = USER_ACCESS_TOKEN,
) -> dict:
"""Get live summary"""
# for flagging trades:
# https://docs.dxfeed.com/misc/dxFeed_TimeAndSale_Sale_Conditions.htm
if brokerage == "miscpaper":
access_token = os.environ["KT_ACCESS_TOKEN"]
elif brokerage == "Tradier Inc.":
pass
else:
raise InvalidBrokerage
sessionid: str = create_session(brokerage=USER_BROKERAGE,
access_token=USER_ACCESS_TOKEN)
payload = {
"sessionid": sessionid,
"symbols": symbols,
"filter": filter,
"linebreak": True,
"validOnly": valid_only,
}
response = requests.post(
"{}/v1/markets/events".format(TR_STREAMING_API_URL),
params=payload,
headers=tr_get_headers(access_token),
stream=True,
)
if response:
for data in response.iter_content(chunk_size=None,
decode_unicode=True):
lines = data.decode("utf-8").replace("}{", "}\n{").split("\n")
for line in lines:
summary = json.loads(line)
return summary
if response.status_code == 400:
logger.error('Oops! An error Occurred ⚠️')
raise BadRequest(response.text)
if response.status_code == 401:
if response.text == "Session not found":
create_session(brokerage=brokerage, access_token=access_token)
else:
logger.error('Oops! An error Occurred ⚠️')
raise InvalidCredentials(response.text)
def tick_data(symbol: str,
start: typing.Any = None,
end: typing.Any = None,
data_filter: str = "open",
brokerage: typing.Any = USER_BROKERAGE,
access_token: str = USER_ACCESS_TOKEN,
dataframe: bool = True) -> dict:
"""Get historical tick data(trades placed) for a particular period of time.
Data available for 5 days in the past."""
if brokerage == "Tradier Inc.":
url = TR_BROKERAGE_API_URL
elif brokerage == "miscpaper":
url = TR_SANDBOX_BROKERAGE_API_URL
else:
logger.error('Oops! An error Occurred ⚠️')
raise InvalidBrokerage
params = {
"symbol": str.upper(symbol),
"start": start,
"end": end,
"session_filter": 'open',
}
response = requests.get(
"{}/v1/markets/timesales".format(url),
headers=tr_get_headers(access_token),
params=params,
stream=True,
)
try:
if response:
if dataframe:
if 'data' in response.json()["series"]:
data = response.json()["series"]["data"]
dataframe = pandas.DataFrame(data)
dataframe['datetime'] = pandas.to_datetime(
dataframe['time'])
dataframe.set_index(['datetime'], inplace=True)
del dataframe['time']
del dataframe['timestamp']
return dataframe
return response.json()
return response.json()
if response.status_code == 400:
logger.error('Oops! An error Occurred ⚠️')
raise BadRequest(response.text)
if response.status_code == 401:
logger.error('Oops! An error Occurred ⚠️')
raise InvalidCredentials(response.text)
except Exception as exception:
logger.error('Oops! An error Occurred ⚠️')
raise exception
def min1_bar_data(symbol: str,
start: typing.Any = None,
end: typing.Any = None,
data_filter: str = "all",
brokerage: typing.Any = USER_BROKERAGE,
access_token: str = USER_ACCESS_TOKEN,
dataframe: bool = True) -> dict:
"""Not in docs. Used in ohlcv(). Get historical bar data with 1 minute interval for a given period of time.
Goes upto 20 days with data points during open market. Goes upto 10 days will all data points."""
if brokerage == "Tradier Inc.":
url = TR_BROKERAGE_API_URL
elif brokerage == "miscpaper":
url = TR_SANDBOX_BROKERAGE_API_URL
else:
logger.error('Oops! An error Occurred ⚠️')
raise InvalidBrokerage
params = {
"symbol": str.upper(symbol),
"interval": "1min",
"start": start,
"end": end,
"session_filter": str(data_filter),
}
response = requests.get(
"{}/v1/markets/timesales".format(url),
headers=tr_get_headers(access_token),
params=params,
stream=True,
)
if response:
if not dataframe:
return response.json()
else:
data = response.json()["series"]["data"]
dataframe = pandas.DataFrame(data)
dataframe['datetime'] = pandas.to_datetime(dataframe['time'])
dataframe.set_index(['datetime'], inplace=True)
del dataframe['time']
del dataframe['timestamp']
return dataframe
if response.status_code == 400:
logger.error('Oops! An error Occurred ⚠️')
raise BadRequest(response.text)
if response.status_code == 401:
logger.error('Oops! An error Occurred ⚠️')
raise InvalidCredentials(response.text)
def min5_bar_data(
symbol: str,
start: typing.Any = None,
end: typing.Any = None,
data_filter: str = "all",
brokerage: typing.Any = USER_BROKERAGE,
access_token: str = USER_ACCESS_TOKEN,
dataframe: bool = True,
) -> dict:
"""Not in docs. Used in ohlcv(). Get historical bar data with 5 minute interval for a given period of time.
Goes upto 40 days with data points during open market. Goes upto 18 days will all data points."""
if brokerage == "Tradier Inc.":
url = TR_BROKERAGE_API_URL
elif brokerage == "miscpaper":
url = TR_SANDBOX_BROKERAGE_API_URL
else:
logger.error('Oops! An error Occurred ⚠️')
raise InvalidBrokerage
params = {
"symbol": str.upper(symbol),
"interval": "5min",
"start": start,
"end": end,
"session_filter": str(data_filter),
}
response = requests.get(
"{}/v1/markets/timesales".format(url),
headers=tr_get_headers(access_token),
params=params,
)
if response:
if not dataframe:
return response.json()
else:
data = response.json()["series"]["data"]
dataframe = pandas.DataFrame(data)
dataframe['datetime'] = pandas.to_datetime(dataframe['time'])
dataframe.set_index(['datetime'], inplace=True)
del dataframe['time']
del dataframe['timestamp']
return dataframe
if response.status_code == 400:
logger.error('Oops! An error Occurred ⚠️')
raise BadRequest(response.text)
if response.status_code == 401:
logger.error('Oops! An error Occurred ⚠️')
raise InvalidCredentials(response.text)
def min15_bar_data(symbol: str,
start: typing.Any = None,
end: str = None,
data_filter: str = "all",
brokerage: typing.Any = USER_BROKERAGE,
access_token: str = USER_ACCESS_TOKEN,
dataframe: bool = True) -> dict:
"""Not in docs. Used in ohlcv(). Get historical bar data with 15 minute interval for a given period of time.
Goes upto 40 days with data points duing open market. Goes upto 18 days will all data points."""
if brokerage == "Tradier Inc.":
url = TR_BROKERAGE_API_URL
elif brokerage == "miscpaper":
url = TR_SANDBOX_BROKERAGE_API_URL
else:
logger.error('Oops! An error Occurred ⚠️')
raise InvalidBrokerage
params = {
"symbol": str.upper(symbol),
"interval": "15min",
"start": start,
"end": end,
"session_filter": str(data_filter),
}
response = requests.get(
"{}/v1/markets/timesales".format(url),
headers=tr_get_headers(access_token),
params=params,
)
if response:
if not dataframe:
return response.json()
else:
data = response.json()["series"]["data"]
dataframe =
|
pandas.DataFrame(data)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import pytest
from kgextension.endpoints import DBpedia
from kgextension.schema_matching import (
relational_matching,
label_schema_matching,
value_overlap_matching,
string_similarity_matching
)
class TestRelationalMatching:
def test1_default(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test2_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test3_uri_querier(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df, uri_data_model=True)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test4_uri_querier_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df, uri_data_model=True)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test5_match_score(self):
score = 0.76
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
expected_matches['value'] = np.where(
expected_matches['value']==1, score, expected_matches['value'])
output_matches = relational_matching(df, match_score=score)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test6_one_endpoint(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df, endpoints=DBpedia)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test7_no_http_input(self):
df = pd.DataFrame({'a': [1, 2, 3],
'b': [4, 5, 6]})
expected_matches = pd.DataFrame(columns=["uri_1", "uri_2", "value"])
output_matches = relational_matching(df)
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
class TestStringSimilarityMatching():
def test1_default(self):
path_input = "test/data/schema_matching/string_matching_input_t1t2.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/string_matching_output_t1.csv"
result_expected = pd.read_csv(path_expected)
result = string_similarity_matching(df, prefix_threshold=1)
pd.testing.assert_frame_equal(result, result_expected, check_like=True)
def test2_highthreshold(self):
path_input = "test/data/schema_matching/string_matching_input_t1t2.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/string_matching_output_t2.csv"
result_expected = pd.read_csv(path_expected)
result = string_similarity_matching(df, prefix_threshold=10)
pd.testing.assert_frame_equal(result, result_expected, check_like=True)
def test3_diffpredicate_diffmetric(self):
path_input = "test/data/schema_matching/string_matching_input_t3.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/string_matching_output_t3.csv"
result_expected = pd.read_csv(path_expected)
result = string_similarity_matching(df, predicate="dbo:abstract", to_lowercase=False, remove_prefixes=False, remove_punctuation=False, similarity_metric="token_set_levenshtein")
pd.testing.assert_frame_equal(result, result_expected, check_like=True)
class TestLabelSchemaMatching:
def test1_default(self):
path_input = "test/data/schema_matching/default_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_boolean_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = label_schema_matching(df)
output_matches['same_label'] = pd.to_numeric(output_matches['same_label'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test2_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_boolean_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = label_schema_matching(df)
output_matches['same_label'] = pd.to_numeric(output_matches['same_label'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test3_uri_querier(self):
path_input = "test/data/schema_matching/default_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_boolean_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = label_schema_matching(df, uri_data_model=True)
output_matches['same_label'] = pd.to_numeric(output_matches['same_label'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test4_uri_querier_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_boolean_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = label_schema_matching(df, uri_data_model=True)
output_matches['same_label'] = pd.to_numeric(output_matches['same_label'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
class TestValueOverlapMatching:
def test1_boolean_data(self):
path_input = "test/data/schema_matching/default_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/value_matches_cities_boolean_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = value_overlap_matching(df)
output_matches['value_overlap'] = pd.to_numeric(output_matches['value_overlap'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test2_no_matches_boolean_data(self):
df = pd.DataFrame({
'city' : [1, 1, 0, 1],
'entity' : ['Bremen', 'Hamburg', 'Denmark', 'Berlin'],
'new_link_1': ['http://dbpedia.org/resource/Bremen', 'http://dbpedia.org/resource/Hamburg', 'http://dbpedia.org/resource/Denmark', 'http://dbpedia.org/resource/Berlin'],
'new_link_in_boolean_http://dbpedia.org/resource/Category:German_state_capitals': [True, True, False, True],
'new_link_in_boolean_http://dbpedia.org/resource/Category:Countries_in_Europe': [False, False, True, False]
})
expected_result_df = pd.DataFrame({
'uri_1' : ['new_link_in_boolean_http://dbpedia.org/resource/Category:Countries_in_Europe'],
'uri_2' : ['http://dbpedia.org/resource/Category:German_state_capitals'],
'value_overlap': [0.0]
})
result = value_overlap_matching(df)
pd.testing.assert_frame_equal(
result, expected_result_df, check_like=True)
def test3_numeric_data(self):
path_input = "test/data/schema_matching/value_matches_cities_numeric_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/value_matches_cities_numeric_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = value_overlap_matching(df)
output_matches['value_overlap'] = pd.to_numeric(output_matches['value_overlap'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test4_no_matches_numeric_data(self):
df = pd.DataFrame({
'city' : [1, 1, 0, 1],
'entity' : ['Bremen', 'Hamburg', 'Denmark', 'Berlin'],
'new_link_1': ['http://dbpedia.org/resource/Bremen', 'http://dbpedia.org/resource/Hamburg', 'http://dbpedia.org/resource/Denmark', 'http://dbpedia.org/resource/Berlin'],
'Link_Out_numeric_http://dbpedia.org/ontology/PopulatedPlace/areaMetro': [1, 0, 0, 0],
'Link_Out_numeric_http://dbpedia.org/ontology/abstract': [12, 12, 11, 12]
})
expected_result_df = pd.DataFrame({
'uri_1' : ['http://dbpedia.org/ontology/PopulatedPlace/areaMetro'],
'uri_2' : ['http://dbpedia.org/ontology/abstract'],
'value_overlap': [0.0]
})
result = value_overlap_matching(df)
pd.testing.assert_frame_equal(
result, expected_result_df, check_like=True)
class TestSimilarityOfPairs:
def test1_smallset(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df =
|
pd.read_csv(path_input)
|
pandas.read_csv
|
#------------------------------#
# working with large amounts #
# of data. #
#------------------------------#
import pandas as pd
df =
|
pd.read_csv('C:/src/learn-pandas/pandas code/pokemon_data.csv')
|
pandas.read_csv
|
import shutil
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from pplkit.data.interface import DataInterface
tmpdir = Path(__file__).parents[1] / "tmp"
@pytest.fixture
def data():
return {"a": [1, 2, 3], "b": [4, 5, 6]}
@pytest.fixture(scope="class", autouse=True)
def rm_tmpdir_after_tests():
yield
shutil.rmtree(tmpdir)
@pytest.mark.parametrize(
"fextn", [".json", ".yaml", ".pkl", ".csv", ".parquet"]
)
def test_data_interface(data, fextn):
dataif = DataInterface(tmp=tmpdir)
if fextn in [".csv", ".parquet"]:
data =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
# Set up visualization
import time
from IPython import display
import pandas as pd
from gretel_client.transformers import DataTransformPipeline
def display_df(
df: pd.DataFrame, sleep: float, clear: bool, title: str, title_color: str
):
style = df.style.apply(highlight_tags, cols=["tags"])
if title:
style = style.set_caption(title).set_table_styles(
[
{
"selector": "caption",
"props": [("color", title_color), ("font-size", "14px")],
}
]
)
if clear:
display.clear_output(wait=True)
display.display(style)
time.sleep(sleep)
def highlight_tags(s, cols) -> list:
""" Style the discovered entities
Params:
s : series
cols : list, list of columns to style
"""
color_map = ["#47E0B3", "#F98043", "#50D8F1", "#C18DFC"]
return [
"background-color: {}".format(color_map[ord(str(x)[-1]) % len(color_map) - 1])
if len(str(x)) > 0 and s.name in cols
else ""
for x in s
]
def stream_table_view(
data: dict,
xf: DataTransformPipeline = None,
sleep: float = 0.0,
title: str = None,
title_color: str = "black",
clear: bool = False,
):
"""
Stream a table view into a Jupyter cell
"""
if xf:
transformed = xf.transform_record(data)
df = pd.DataFrame.from_dict(
transformed["record"], orient="index", columns=["field"]
)
df["tags"] = ""
for field, value in transformed["record"].items():
if field in data["record"].keys():
if value != data["record"][field]:
df.at[field, "tags"] = "Transformed"
else:
field_data = data["metadata"]["fields"].get(
str(field), {"ner": {"labels": []}}
)
labels = ", ".join(
[x["label"] for x in field_data["ner"]["labels"]]
)
df.at[field, "tags"] = labels
else:
df.at[field, "tags"] = "Transformed"
else:
# Gretel format record +
df = pd.DataFrame.from_dict(data["record"], orient="index", columns=["field"])
df["tags"] = ""
for field in list(df.index):
field_data = data["metadata"]["fields"].get(
str(field), {"ner": {"labels": []}}
)
labels = ", ".join([x["label"] for x in field_data["ner"]["labels"]])
df.at[field, "tags"] = labels
display_df(df, sleep, clear, title, title_color)
def entries_list_view(
d: dict,
sleep: float = 0.0,
clear: bool = False,
title: str = None,
title_color: str = "black",
):
df =
|
pd.DataFrame.from_dict(d, orient="index", columns=["entries"])
|
pandas.DataFrame.from_dict
|
import pandas as pd
import numpy as np
from backtester.constants import *
class InstrumentData(object):
'''
'''
def __init__(self, instrumentId, tradeSymbol, fileName=None, chunkSize=None):
self.__instrumentId = instrumentId
self.__tradeSymbol = tradeSymbol
self.__fileName = fileName
self.__bookDataSize = None
if chunkSize is None:
if fileName:
self.__bookData = pd.read_csv(fileName, index_col=0, parse_dates=True, dtype=float)
self.__bookData.dropna(inplace=True)
self.__bookDataSize = len(self.__bookData)
self.getBookDataChunk = self.__getBookDataInChunksFromDataFrame
else:
self.__bookData = pd.read_csv(fileName, index_col=0, parse_dates=True, dtype=float, chunksize=chunkSize)
self.getBookDataChunk = self.__getBookDataInChunksFromFile
def getInstrumentId(self):
return self.__instrumentId
def getTradeSymbol(self):
return self.__tradeSymbol
def getBookDataSize(self):
if self.__bookDataSize is None:
self.__bookDataSize = len(pd.read_csv(self.__fileName, index_col=0, usecols=[0]))
return self.__bookDataSize
def setBookData(self, data):
self.__bookData = data
self.__bookDataSize = len(self.__bookData)
def getBookData(self):
return self.__bookData
def getBookDataByFeature(self, feature):
return self.__bookData[feature]
# returns a chunk from already completely loaded data
def __getBookDataInChunksFromDataFrame(self, chunkSize):
if chunkSize <=0 :
logError("chunkSize must be a positive integer")
for chunkNumber, bookDataChunk in self.__bookData.groupby(np.arange(self.__bookDataSize) // chunkSize):
yield (chunkNumber, bookDataChunk)
# returns a chunk from __bookData generator after processing data
# TODO: implement proper padding such that all instruments have same index set (timeUpdates)
def __getBookDataInChunksFromFile(self, dateRange):
chunkNumber = -1
for bookDataChunk in self.__bookData:
chunkNumber += 1
bookDataChunk = self.filterDataByDates(bookDataChunk, dateRange)
yield (chunkNumber, bookDataChunk)
# returns all timestamps in pandas series format
def getAllTimestamps(self):
if isinstance(self.__bookData, pd.DataFrame):
return self.__bookData.index
else:
return pd.read_csv(self.__fileName, index_col=0, usecols=[0]).index
# returns list of bookDataFeatures (columns)
def getBookDataFeatures(self):
if isinstance(self.__bookData, pd.DataFrame):
return list(self.__bookData.columns)
else:
return list(pd.read_csv(self.__fileName, index_col=0, nrows=1).columns)
def getTypeOfInstrument(self):
return INSTRUMENT_TYPE_STOCK
def filterDataByDates(self, dateRange):
if (dateRange is []) or (dateRange is ()):
return
elif type(dateRange) is list:
frames = []
for dr in dateRange:
frames.append(self.__bookData[dr[0]:dr[1]])
self.__bookData =
|
pd.concat(frames)
|
pandas.concat
|
# Projeto 2 - Prevendo o Retorno Financeiro de Investimentos em Títulos Públicos
# Parte 1 - Desenvolvimento do Modelo Usando Framework
# Imports
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import warnings
warnings.filterwarnings("ignore")
# Carregando o dataset
df = pd.read_csv('dados/dataset.csv')
print("\n")
print('Dados Carregados com Sucesso!')
print('Shape:', df.shape)
print(df.head())
# Visualizando dados
df.plot(x = 'Investimento', y = 'Retorno', style = 'o')
plt.title('Invetimento x Retorno')
plt.xlabel('Investimento')
plt.ylabel('Retorno')
plt.savefig('imagens/parte1-grafico1.png')
plt.show()
# Preparando os dados
X = df.iloc[:, :-1].values
y = df.iloc[:, 1].values
# Divisão em dados de treino e teste (70/30)
X_treino, X_teste, y_treino, y_teste = train_test_split(X, y, test_size = 0.3, random_state = 0)
# Ajusta o shape e tipo de dados de entrada de treino
X_treino = X_treino.reshape(-1, 1).astype(np.float32)
# Construção do Modelo
# Modelo de regressão linear
modelo = LinearRegression()
# Treinamento do modelo
modelo.fit(X_treino, y_treino)
print("\n")
print('Modelo Treinado com Sucesso!')
# Imprimindo os coeficientes B0 e B1
print("\n")
print('B1 (coef_) :', modelo.coef_)
print('B0 (intercept_) :', modelo.intercept_)
# Plot da linha de regressão linear
# y = B1 * X + B0
regression_line = modelo.coef_ * X + modelo.intercept_
plt.scatter(X, y)
plt.title('Invetimento x Retorno')
plt.xlabel('Investimento')
plt.ylabel('Retorno Previsto')
plt.plot(X, regression_line, color = 'red')
plt.savefig('imagens/parte1-regressionLine.png')
plt.show()
# Previsões com dados de teste
y_pred = modelo.predict(X_teste)
# Real x Previsto
df_valores =
|
pd.DataFrame({'Valor Real': y_teste, 'Valor Previsto': y_pred})
|
pandas.DataFrame
|
"""
Contains functions and classes that are olfactory-specific.
@author: <NAME>
"""
# ################################# IMPORTS ###################################
import copy
import itertools
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.linalg as LA
import scipy.stats as sps
import pathlib
import functions.nmf as nmf
import functions.general as FG
import functions.plotting as FP
import functions.olf_circ_offline as FOC
import params.act2 as par_act2
import params.act3 as par_act3
import params.con as par_con
import os
from typing import List, Tuple, Any
from functions.plotting import plot_cov # since we are not using any other
# plotting function
PATH = os.path.realpath(f"{os.path.expanduser('~')}/ORN-LN_circuit") + '/'
OLF_PATH = pathlib.Path.home() / 'ORN-LN_circuit'
# ###################### FUNCTIONS CONNECTIVITY DATA ##########################
def get_labels(sheet, n_begin: int, n: int):
all_labels_row = np.array(FG.get_labels_row(sheet, n_begin, n, 1))
all_labels_column = np.array(FG.get_labels_clmn(sheet, n_begin, n, 1))
if np.sum((all_labels_row == all_labels_column)-1) != 0:
print('left headers rows and columns are not the same!')
all_labels = all_labels_row # both labels are the same
# to get names similar to the activity data
all_labels = FG.replace_substr_np(all_labels, par_con.dict_str_replace)
return all_labels
def combine_labels(labels_left, labels_right, lab):
"""
this function combines the left and right cell names.
It first replaces left and L and right by R if it is the same label on
both sides
Then if on the right there is the string right and on the left there is the
string left, it is replaced by the string lab
"""
labels_combined = np.zeros(len(labels_left), dtype='U40')
for i in range(len(labels_combined)):
if labels_left[i] == labels_right[i]:
labels_combined[i] = FG.repl_add(labels_left[i], ' left', ' L')
labels_combined[i] = FG.repl_add(labels_combined[i], ' right', ' R')
labels_combined[i] += lab
else:
if ('left' in labels_left[i]) and ('right' in labels_right[i]):
labels_combined[i] = FG.repl_add(labels_left[i], ' left', lab)
else:
labels_combined[i] = labels_left[i] + labels_right[i]
print('something weird in the function combine_labels')
print('labels are', labels_left[i], labels_right[i])
return labels_combined
# We need to find a new way to combine the cells, being caresulf with the
# broad cells
# that means we cannot anymore just sum cells
def import_con_data(keys=['L', 'R', 'M']):
"""
returns the connectivity data
this function makes several transformations to the initial dataset
which is encoded in an excel sheet. Transformations that are made
are around the names of the variables, and also the creation of
the M dataset, which are the mean connections from L and R
keys should be a list or an array indicating which 'sides' we want to get
options are: 'L', 'R', 'S', 'M'
the option None return them all, no option returns L, R, M
"""
dict_str = {'bilateral': 'bil.',
# 'left': 'L',
# 'right': 'R',
'dendrites': 'dend',
'octopaminergic': 'oct.',
'Olfactory': 'olf.',
'LOWER': 'low.',
'UPPER': 'up.',
'Descending': 'Desc.'}
sheet_con_L = FG.get_sheet(OLF_PATH / par_con.file_L)
sheet_con_R = FG.get_sheet(OLF_PATH / par_con.file_R)
cons = {}
cons['L'] = FG.get_data(sheet_con_L, par_con.all_begin, par_con.all_n,
par_con.all_begin, par_con.all_n)
cons['R'] = FG.get_data(sheet_con_R, par_con.all_begin, par_con.all_n,
par_con.all_begin, par_con.all_n)
cells = {}
cells['L'] = get_labels(sheet_con_L, par_con.all_begin, par_con.all_n)
cells['R'] = get_labels(sheet_con_R, par_con.all_begin, par_con.all_n)
# changing the position of ORN and PN in the cell names
for i, s in itertools.product(range(len(cells['L'])), ['L', 'R']):
cells[s][i] = FG.repl_preadd(cells[s][i], ' ORN', 'ORN ')
cells[s][i] = FG.repl_preadd(cells[s][i], ' PN', 'PN ')
cells['S'] = combine_labels(cells['L'], cells['R'], ' S')
cells['M'] = combine_labels(cells['L'], cells['R'], ' M')
for cells1 in cells.values():
cells1 = FG.replace_substr_np(cells1, dict_str)
for i in range(len(cells['L'])):
cells['L'][i] = FG.repl_add(cells['L'][i], ' left', ' L')
cells['R'][i] = FG.repl_add(cells['R'][i], ' left', ' L')
cells['L'][i] = FG.repl_add(cells['L'][i], ' right', ' R')
cells['R'][i] = FG.repl_add(cells['R'][i], ' right', ' R')
cells_bil = ['Keystone', 'PN 35a bil.'] # bilateral cells
for cl in cells_bil:
if cells['L'][i] == f'{cl} L':
cells['L'][i] = f'{cl} L L'
if cells['L'][i] == f'{cl} R':
cells['L'][i] = f'{cl} R L'
if cells['R'][i] == f'{cl} R':
cells['R'][i] = f'{cl} R R'
if cells['R'][i] == f'{cl} L':
cells['R'][i] = f'{cl} L R'
cons['L'] =
|
pd.DataFrame(cons['L'], index=cells['L'], columns=cells['L'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
assert not (series['A'] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer],
df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
assert d['a'].dtype == np.object_
assert not d['a'][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([Categorical(list('abc')), Categorical(list('abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
pytest.raises(ValueError,
lambda: DataFrame([Categorical(list('abc')),
Categorical(list('abdefg'))]))
# ndim > 1
pytest.raises(NotImplementedError,
lambda: Categorical(np.array([list('abcd')])))
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert 'index' not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
assert np.isnan(df['c'][0])
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6),
(7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32),
('y', np.int64), ('z', np.int32)])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=['x', 'y']),
check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i / length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i / length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a') # noqa
assert columns == original_columns
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
assert df['a'].dtype == object
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
assert df['a'].dtype == np.float64
assert np.isnan(df['a'].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
assert result.index.name == 'order_id'
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
assert result.index.names == ('order_id', 'quantity')
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
tm.assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'object': 1})
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 1, 'int64': 1})
tm.assert_series_equal(results, expected)
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a', 'b', 'c'])
expected = DataFrame(columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a', 'b', 'b'])
expected = DataFrame(columns=['a', 'b', 'b'])
tm.assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
tm.assert_index_equal(df.index, Index([1], name='id'))
assert df.index.name == 'id'
tm.assert_index_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
tm.assert_index_equal(df.index, Index([], name='id'))
assert df.index.name == 'id'
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH 6140
expected = DataFrame({'EXPIRY': [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[ns]')]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[m]')]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1])
for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in compat.iteritems(blocks):
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = (DataFrame.from_records(tuples, columns=columns)
.reindex(columns=df.columns))
# created recarray and with to_records recarray (have dtype info)
result2 = (DataFrame.from_records(recarray, columns=columns)
.reindex(columns=df.columns))
result3 = (DataFrame.from_records(recarray2, columns=columns)
.reindex(columns=df.columns))
# list of tupels (no dtype info)
result4 = (DataFrame.from_records(lists, columns=columns)
.reindex(columns=df.columns))
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, pd.Index(lrange(8)))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index('C'), columns.index('E1')]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result['C'], df['C'])
tm.assert_series_equal(result['E1'], df['E1'].astype('float64'))
# empty case
result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])
assert len(result) == 0
tm.assert_index_equal(result.columns,
pd.Index(['foo', 'bar', 'baz']))
result =
|
DataFrame.from_records([])
|
pandas.DataFrame.from_records
|
import pandas as pd
import matplotlib.pyplot as plt
from . import get_data
class TeamStat:
def __init__(self,tid):
self.tid = tid
self.team_data = pd.DataFrame()
self.personal_data = pd.DataFrame()
def get_team_data(self,pages=-1):
self.team_data = get_data.get_team(self.tid,pages).astype({'timestamp':int})
def get_personal_data(self,source='https://www.dropbox.com/s/nr12wbnnx7m3w89/charcount.csv?dl=1'):
df =
|
pd.read_csv(source)
|
pandas.read_csv
|
__author__ = 'lucabasa'
__version__ = '1.0'
__status__ = 'development'
import numpy as np
import pandas as pd
from utilities import read_data
import feature_eng as fe
import feature_selection as fs
import model_selection as ms
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
agg_loc = 'processed_data/'
agg_name = 'total_aggregation_with_FE_0219.csv'
save_loc = 'results/stack_n_blend/'
model_list = {'lightGBM': ms.lightgbm_train,
'XGB': ms.xgb_train,
'lightGMBrf': ms.lightgbm_rf,
'RF': ms.rf_train,
'extra': ms.extratrees_train}
sel_list = {'only_hist': fs.sel_hist,
'only_new': fs.sel_new,
'only_money': fs.sel_money,
'only_counts': fs.sel_counts,
'no_money': fs.sel_nomoney,
'full': fs.sel_all}
def stack():
train = pd.read_csv('results/stack_n_blend/oof_predictions.csv')
del train['Unnamed: 0']
test = pd.read_csv('results/stack_n_blend/all_predictions.csv')
target = train['target']
id_to_sub = test.card_id
kfolds = KFold(5, shuffle=True, random_state=42)
predictions, cv_score, feat_imp, oof = ms.rf_train(train, test, target, kfolds)
print(f'random forest:\t {cv_score}')
sub_df = pd.DataFrame({"card_id":id_to_sub.values})
sub_df['target'] = predictions
sub_df.to_csv('stack_rf.csv', index=False)
feat_imp.to_csv('stack_rf_featimp.csv', index=False)
predictions, cv_score, feat_imp, oof = ms.extratrees_train(train, test, target, kfolds)
print(f'Extra trees:\t {cv_score}')
sub_df = pd.DataFrame({"card_id":id_to_sub.values})
sub_df['target'] = predictions
sub_df.to_csv('stack_extratrees.csv', index=False)
feat_imp.to_csv('stack_extratrees_featimp.csv', index=False)
predictions, cv_score, feat_imp, oof = ms.lightgbm_train(train, test, target, kfolds)
print(f'lightGBM:\t {cv_score}')
sub_df = pd.DataFrame({"card_id":id_to_sub.values})
sub_df['target'] = predictions
sub_df.to_csv(save_loc + 'stack_lightgbm.csv', index=False)
feat_imp.to_csv(save_loc + 'stack_lightgbm_featimp.csv', index=False)
def blend():
train = pd.read_csv('results/stack_n_blend/oof_predictions.csv')
del train['Unnamed: 0']
test = pd.read_csv('results/stack_n_blend/all_predictions.csv')
target = train['target']
id_to_sub = test.card_id
kfolds = KFold(5, shuffle=True, random_state=42)
del train['target']
train['oof_score'] = train.mean(axis=1)
print('Full blend: ', mean_squared_error(train.oof_score, target)**0.5)
del train['oof_score']
scores = pd.read_csv('results/stack_n_blend/single_cvscores.csv')
scores = scores.rename(columns={'Unnamed: 0': 'models'})
for num in np.arange(1, 15):
best_blends = scores.sort_values(by='CV_score').head(num).models.values
train['oof_score'] = train[best_blends].mean(axis=1)
print(f'Best {num} blends: ', mean_squared_error(train.oof_score, target)**0.5)
del train['oof_score']
tot_score = scores.CV_score.sum()
for model in scores.models.unique():
train[model] = train[model] * (scores[scores.models == model].CV_score.values[0] / tot_score)
train['oof_score'] = train.sum(axis=1)
print('Weighted blend: ', mean_squared_error(train.oof_score, target)**0.5)
def single_model():
train = read_data('raw_data/train.csv')
test = read_data('raw_data/test.csv')
df_tr = pd.read_csv(agg_loc + agg_name)
train = pd.merge(train, df_tr, on='card_id', how='left').fillna(0)
test = pd.merge(test, df_tr, on='card_id', how='left').fillna(0)
del df_tr
train = fe.combine_categs(train)
test = fe.combine_categs(test)
kfolds = KFold(5, shuffle=True, random_state=42)
results = {}
for_second_level = pd.DataFrame({'target': train['target']})
for model in model_list.keys():
to_train = model_list.get(model)
for selection in sel_list:
to_select = sel_list.get(selection)
print(f'{model}_{selection}')
df_train = train.copy()
df_test = test.copy()
target = df_train['target']
id_to_sub = df_test['card_id']
del df_train['target']
del df_train['card_id']
del df_test['card_id']
df_train, df_test = to_select(df_train, df_test)
predictions, cv_score, feat_imp, oof = to_train(df_train, df_test, target, kfolds)
results[model + '_' + selection] = cv_score
for_second_level[model + '_' + selection] = oof
sub_df = pd.DataFrame({"card_id":id_to_sub.values})
sub_df["target"] = predictions
sub_df.to_csv(save_loc + model + '_' + selection + '.csv', index=False)
feat_imp.to_csv(save_loc + model + '_' + selection + "_featimp.csv", index=False)
for_second_level.to_csv(save_loc + 'oof_predictions.csv')
print(f'{model}_{selection}:\t {cv_score}')
print('_'*40)
print('_'*40)
print('\n')
final = pd.DataFrame.from_dict(results, orient='index', columns=['CV_score'])
final.to_csv(save_loc + 'single_cvscores.csv')
for_second_level.to_csv(save_loc + 'oof_predictions.csv')
def stack_with_features():
train = read_data('raw_data/train.csv')
test = read_data('raw_data/test.csv')
df_tr = pd.read_csv(agg_loc + agg_name)
train = pd.merge(train, df_tr, on='card_id', how='left').fillna(0)
test = pd.merge(test, df_tr, on='card_id', how='left').fillna(0)
del df_tr
train = fe.combine_categs(train)
test = fe.combine_categs(test)
train = train[['card_id', 'target'] + [col for col in train.columns if 'purchase' in col or 'month' in col]]
test = test[['card_id'] + [col for col in train.columns if 'purchase' in col or 'month' in col]]
print(train.columns)
stacked = pd.read_csv('results/stack_n_blend/oof_predictions.csv')
del stacked['Unnamed: 0']
del stacked['target']
st_test = pd.read_csv('results/stack_n_blend/all_predictions.csv')
#stacked = stacked[[col for col in stacked.columns if 'lightGBM_' in col]]
#st_test = st_test[[col for col in stacked.columns if 'lightGBM_' in col] + ['card_id']]
train = pd.concat([train, stacked], axis=1)
test =
|
pd.merge(test, st_test, on='card_id', how='left')
|
pandas.merge
|
from wikidataintegrator.wdi_helpers import try_write
from wikidataintegrator import wdi_core, wdi_login
from bs4 import BeautifulSoup # library to parse HTML documents
from datetime import datetime
from getpass import getpass
import pandas as pd # library for data analysis
import requests # library to handle requests
import os
# get the response in the form of html
wikiurl="https://en.wikipedia.org/wiki/Template:COVID-19_pandemic_death_rates"
table_class="wikitable sortable jquery-tablesorter"
response=requests.get(wikiurl)
soup = BeautifulSoup(response.text, 'html.parser')
casetable=soup.find('table',{'class':"wikitable"})
df=pd.read_html(str(casetable))
# convert list to dataframe
cases_df=pd.DataFrame(df[0])
local_outbreak_items =
|
pd.read_csv("reference.csv")
|
pandas.read_csv
|
import re
import requests
from bs4 import BeautifulSoup
import json
from collections import OrderedDict
from io import StringIO
import pandas as pd
from astropy.time import Time
from datetime import datetime,date,timedelta
from tns_api_search import search, get, format_to_json, get_file
from astropy.coordinates import SkyCoord
from astropy import units as u
url_tns_api="https://wis-tns.weizmann.ac.il/api/get"
from credentials import tns
api_key = tns.settings.API_KEY
def goodrow(class_):
return ((class_=="row-even public odd") or (class_=='row-odd public odd'))
def getTNS(reportdays=5,discoverdays=5,enddate=None,classified=1,disc_mag_min=16,disc_mag_max=21,z_min=0.015,z_max=0.08,
skip_ztf=False,num_page=100,verbose=False,otherparams={},**kwargs):
'''
returns a coma separated list with the redshift, internal name, discovery date, and discovery magnitude
of objetcs from TNS which match the search criteria
parameters:
reportdays - maximum number of days that have past since being reported
z_min - minimum redshift
z_max - maximum redshift
disc_mag_min - minimum discovery magnitude
disc_mag_max - maximum discovery magnitude
Note: I believe this is just a numerical cut, not physical, i.e. the minimum is the lowest numerical
value that will be returned
calssified - 1: is classified, 0: classification not considered
unclassified - 1: is unclassified, 0: unclassification not considered
'''
# link = f'https://wis-tns.weizmann.ac.il/search?&discovered_period_value={reportdays}&discovered_period_units=days&unclassified_at={unclassified}&classified_sne={classified}&name=&name_like=0&isTNS_AT=all&public=all&coords_unit=arcsec&redshift_min={z_min}&redshift_max={z_max}&discovery_mag_min={disc_mag_min}&discovery_mag_max={disc_mag_max}&objtype=3&sort=desc&order=discoverydate&num_page=500'
link = 'https://wis-tns.weizmann.ac.il/search'
if enddate is None:
enddate = date.today()
startdate = enddate - timedelta(discoverdays)
params = {"discovered_period_value":reportdays,
"discovered_period_units":"days",
"date_start[date]":startdate.isoformat(),
"date_end[date]":enddate.isoformat(),
"classified_sne":int(classified),
"unclassified_at":int(not(classified)),
"discovery_mag_min":disc_mag_min,
"discovery_mag_max":disc_mag_max,
"num_page":num_page
}
params.update(otherparams)
if classified:
params.update({"objtype":3,
"redshift_min":z_min,
"redshift_max":z_max,
"sort":"desc",
"order":"discoverydate"})
else:
params.update({"at_type":1,
"sort":"asc",
"order":"internal_name"})
r = requests.get(link,params=params)
if verbose:
print(r.url)
soup = BeautifulSoup(r.text, "lxml")
return_arr = []
tr = soup.find_all('tbody')
if verbose:
print("Number of tables on the webpage:",len(tr))
if len(tr)>0:
tr = tr[0]
else:
raise RuntimeError("No result is found")
cols = ['internal_name','redshift','ra','decl','hostname','host_redshift','discoverydate','discoverymag','disc_filter_name','name','ot_name']
dflist = []
if verbose:
print("Number of rows in search result: ",len(tr.find_all(class_=goodrow,recursive=False)))
for row in tr.find_all(class_=goodrow,recursive=False):
df = {}
for col in cols:
value = row.find('td',class_='cell-{}'.format(col),recursive=False)
if value is None:
df[col] = None
else:
df[col] = value.text
df['name'] = df['name'].split()[1]
if (not classified) & skip_ztf & df['internal_name'].startswith('ZTF'):
break
dflist.append(df)
df = pd.DataFrame(dflist)
df.columns = ['internal_name','redshift','ra_s','dec_s','hostname','host_redshift','discoverydate','discoverymag','disc_filter_name','tns_name','type']
c = SkyCoord(ra=df.ra_s.values, dec=df.dec_s.values, unit=(u.hourangle,u.deg))
df['meanra'] = c.ra.degree
df['meandec'] = c.dec.degree
df['oid'] = df['tns_name']
return df.sort_values('discoverydate',ascending=False).reset_index(drop=True)
def get_tns_name(internal_name):
search_obj = [("internal_name",internal_name)]
response = search(url_tns_api,search_obj)
if None not in response:
json_data = format_to_json(response.text)
reply = json.loads(json_data)['data']['reply']
if len(reply) == 0:
return
else:
return reply[0]['objname']
else:
print(response[1])
return
def get_tns_data(tns_name):
data = {}
get_obj=[("objname",tns_name), ("photometry","1"), ("spectra","1")]
response=get(url_tns_api,get_obj)
if None not in response:
# Here we just display the full json data as the response
json_data = format_to_json(response.text)
data['meta'] = format_meta(json.loads(json_data)['data']['reply'])
photometry = json.loads(json_data)['data']['reply']['photometry']
spectra = json.loads(json_data)['data']['reply']['spectra']
data['photometry'] = format_photometry(photometry)
data['spectra'] = format_spectra(spectra)
else:
print (response[1])
data = None
return data
def format_meta(reply):
cols = ['internal_name','redshift','radeg','decdeg','hostname','host_redshift','discoverydate','discoverymag','discmagfilter','name']
df = {k: reply[k] for k in cols}
return pd.DataFrame([pd.DataFrame(df).loc['name']])
def format_photometry(photometry):
dflist = []
for epoch in photometry:
dflist.append(epoch)
df = pd.DataFrame(dflist)
cols = ['flux_unit','instrument','telescope','filters']
for col in cols:
df[col] = df[col].apply(pd.Series)['name']
df['mjd'] = Time(list(df['obsdate'].values),format='iso').mjd
return df
def format_spectra(spectra):
if len(spectra) == 0:
return
else:
dflist = []
for epoch in spectra:
dflist.append(epoch)
df =
|
pd.DataFrame(dflist)
|
pandas.DataFrame
|
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_split(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.split("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.split("__")
tm.assert_series_equal(result, exp)
result = values.str.split("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.split("[,_]")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
def test_split_object_mixed():
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.split("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_n(any_string_dtype, method):
s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype)
expected = Series([["a", "b"], pd.NA, ["b", "c"]])
result = getattr(s.str, method)(" ", n=None)
tm.assert_series_equal(result, expected)
result = getattr(s.str, method)(" ", n=0)
tm.assert_series_equal(result, expected)
def test_rsplit(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.rsplit("__")
tm.assert_series_equal(result, exp)
result = values.str.rsplit("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.rsplit("[,_]")
exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_", n=1)
exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]])
tm.assert_series_equal(result, exp)
def test_rsplit_object_mixed():
# mixed
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.rsplit("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
def test_split_blank_string(any_string_dtype):
# expand blank split GH 20067
values = Series([""], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame([[]], dtype=any_string_dtype) # NOTE: this is NOT an empty df
tm.assert_frame_equal(result, exp)
values = Series(["a b c", "a b", "", " "], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame(
[
["a", "b", "c"],
["a", "b", np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_split_noargs(any_string_dtype):
# #1859
s = Series(["<NAME>", "Travis Oliphant"], dtype=any_string_dtype)
result = s.str.split()
expected = ["Travis", "Oliphant"]
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
@pytest.mark.parametrize(
"data, pat",
[
(["bd asdf jfg", "kjasdflqw asdfnfk"], None),
(["bd asdf jfg", "kjasdflqw asdfnfk"], "asdf"),
(["bd_asdf_jfg", "kjasdflqw_asdfnfk"], "_"),
],
)
def test_split_maxsplit(data, pat, any_string_dtype):
# re.split 0, str.split -1
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=-1)
xp = s.str.split(pat=pat)
tm.assert_series_equal(result, xp)
result = s.str.split(pat=pat, n=0)
tm.assert_series_equal(result, xp)
@pytest.mark.parametrize(
"data, pat, expected",
[
(
["split once", "split once too!"],
None,
Series({0: ["split", "once"], 1: ["split", "once too!"]}),
),
(
["split_once", "split_once_too!"],
"_",
Series({0: ["split", "once"], 1: ["split", "once_too!"]}),
),
],
)
def test_split_no_pat_with_nonzero_n(data, pat, expected, any_string_dtype):
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=1)
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)})
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_unequal_splits", "one_of_these_things_is_not"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{
0: ["some", "one"],
1: ["unequal", "of"],
2: ["splits", "these"],
3: [np.nan, "things"],
4: [np.nan, "is"],
5: [np.nan, "not"],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
with pytest.raises(ValueError, match="expand must be"):
s.str.split("_", expand="not_a_boolean")
def test_split_to_multiindex_expand():
# https://github.com/pandas-dev/pandas/issues/23677
idx = Index(["nosplit", "alsonosplit", np.nan])
result = idx.str.split("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "equal", "splits"),
("with", "no", "nans"),
[np.nan, np.nan, np.nan],
[None, None, None],
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_unequal_splits", "one_of_these_things_is_not", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "unequal", "splits", np.nan, np.nan, np.nan),
("one", "of", "these", "things", "is", "not"),
(np.nan, np.nan, np.nan, np.nan, np.nan, np.nan),
(None, None, None, None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with pytest.raises(ValueError, match="expand must be"):
idx.str.split("_", expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"])}, dtype=any_string_dtype)
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=2)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=1)
exp = DataFrame(
{0: ["some_equal", "with_no"], 1: ["splits", "nans"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand():
idx = Index(["nosplit", "alsonosplit"])
result = idx.str.rsplit("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True)
exp = MultiIndex.from_tuples([("some", "equal", "splits"), ("with", "no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True, n=1)
exp = MultiIndex.from_tuples([("some_equal", "splits"), ("with_no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(any_string_dtype):
# gh-18450
s = Series(["foo,bar,baz", np.nan], dtype=any_string_dtype)
result = s.str.split(",", expand=True)
exp = DataFrame(
[["foo", "bar", "baz"], [np.nan, np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan/pd.NA and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
if any_string_dtype == "object":
assert all(np.isnan(x) for x in result.iloc[1])
else:
assert all(x is pd.NA for x in result.iloc[1])
def test_split_with_name(any_string_dtype):
# GH 12617
# should preserve name
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
res = s.str.split(",")
exp = Series([["a", "b"], ["c", "d"]], name="xxx")
tm.assert_series_equal(res, exp)
res = s.str.split(",", expand=True)
exp = DataFrame([["a", "b"], ["c", "d"]], dtype=any_string_dtype)
tm.assert_frame_equal(res, exp)
idx = Index(["a,b", "c,d"], name="xxx")
res = idx.str.split(",")
exp = Index([["a", "b"], ["c", "d"]], name="xxx")
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(",", expand=True)
exp = MultiIndex.from_tuples([("a", "b"), ("c", "d")])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series(
[("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series(
[("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h"), None]
)
tm.assert_series_equal(result, expected)
# more than one char
s = Series(["a__b__c", "c__d__e", np.nan, "f__g__h", None])
result = s.str.partition("__", expand=False)
expected = Series(
[
("a", "__", "b__c"),
("c", "__", "d__e"),
np.nan,
("f", "__", "g__h"),
None,
],
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("__", expand=False)
expected = Series(
[
("a__b", "__", "c"),
("c__d", "__", "e"),
np.nan,
("f__g", "__", "h"),
None,
],
)
tm.assert_series_equal(result, expected)
# None
s = Series(["a b c", "c d e", np.nan, "f g h", None], dtype=any_string_dtype)
result = s.str.partition(expand=False)
expected = Series(
[("a", " ", "b c"), ("c", " ", "d e"), np.nan, ("f", " ", "g h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition(expand=False)
expected = Series(
[("a b", " ", "c"), ("c d", " ", "e"), np.nan, ("f g", " ", "h"), None]
)
tm.assert_series_equal(result, expected)
# Not split
s = Series(["abc", "cde", np.nan, "fgh", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series([("abc", "", ""), ("cde", "", ""), np.nan, ("fgh", "", ""), None])
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series([("", "", "abc"), ("", "", "cde"), np.nan, ("", "", "fgh"), None])
tm.assert_series_equal(result, expected)
# unicode
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series([("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h")])
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series([("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h")])
tm.assert_series_equal(result, expected)
# compare to standard lib
s = Series(["A_B_C", "B_C_D", "E_F_G", "EFGHEF"], dtype=any_string_dtype)
result = s.str.partition("_", expand=False).tolist()
assert result == [v.partition("_") for v in s]
result = s.str.rpartition("_", expand=False).tolist()
assert result == [v.rpartition("_") for v in s]
def test_partition_index():
# https://github.com/pandas-dev/pandas/issues/23558
values = Index(["a_b_c", "c_d_e", "f_g_h", np.nan, None])
result = values.str.partition("_", expand=False)
exp = Index(
np.array(
[("a", "_", "b_c"), ("c", "_", "d_e"), ("f", "_", "g_h"), np.nan, None],
dtype=object,
)
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition("_", expand=False)
exp = Index(
np.array(
[("a_b", "_", "c"), ("c_d", "_", "e"), ("f_g", "_", "h"), np.nan, None],
dtype=object,
)
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition("_")
exp = Index(
[
("a", "_", "b_c"),
("c", "_", "d_e"),
("f", "_", "g_h"),
(np.nan, np.nan, np.nan),
(None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition("_")
exp = Index(
[
("a_b", "_", "c"),
("c_d", "_", "e"),
("f_g", "_", "h"),
(np.nan, np.nan, np.nan),
(None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_")
expected = DataFrame(
{
0: ["a", "c", np.nan, "f", None],
1: ["_", "_", np.nan, "_", None],
2: ["b_c", "d_e", np.nan, "g_h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
result = s.str.rpartition("_")
expected = DataFrame(
{
0: ["a_b", "c_d", np.nan, "f_g", None],
1: ["_", "_", np.nan, "_", None],
2: ["c", "e", np.nan, "h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=True)
expected = DataFrame(
{
0: ["a", "c", np.nan, "f", None],
1: ["_", "_", np.nan, "_", None],
2: ["b_c", "d_e", np.nan, "g_h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
result = s.str.rpartition("_", expand=True)
expected = DataFrame(
{
0: ["a_b", "c_d", np.nan, "f_g", None],
1: ["_", "_", np.nan, "_", None],
2: ["c", "e", np.nan, "h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_partition_with_name(any_string_dtype):
# GH 12617
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
result = s.str.partition(",")
expected = DataFrame(
{0: ["a", "c"], 1: [",", ","], 2: ["b", "d"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# should preserve name
result = s.str.partition(",", expand=False)
expected = Series([("a", ",", "b"), ("c", ",", "d")], name="xxx")
tm.assert_series_equal(result, expected)
def test_partition_index_with_name():
idx = Index(["a,b", "c,d"], name="xxx")
result = idx.str.partition(",")
expected = MultiIndex.from_tuples([("a", ",", "b"), ("c", ",", "d")])
assert result.nlevels == 3
tm.assert_index_equal(result, expected)
# should preserve name
result = idx.str.partition(",", expand=False)
expected = Index(np.array([("a", ",", "b"), ("c", ",", "d")]), name="xxx")
assert result.nlevels == 1
tm.assert_index_equal(result, expected)
def test_partition_sep_kwarg(any_string_dtype):
# GH 22676; depr kwarg "pat" in favor of "sep"
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
expected = s.str.partition(sep="_")
result = s.str.partition("_")
tm.assert_frame_equal(result, expected)
expected = s.str.rpartition(sep="_")
result = s.str.rpartition("_")
tm.assert_frame_equal(result, expected)
def test_get():
ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"])
result = ser.str.split("_").str.get(1)
expected = Series(["b", "d", np.nan, "g"])
tm.assert_series_equal(result, expected)
def test_get_mixed_object():
ser = Series(["a_b_c", np.nan, "c_d_e", True, datetime.today(), None, 1, 2.0])
result = ser.str.split("_").str.get(1)
expected = Series(["b", np.nan, "d", np.nan, np.nan, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_get_bounds():
ser = Series(["1_2_3_4_5", "6_7_8_9_10", "11_12"])
# positive index
result = ser.str.split("_").str.get(2)
expected = Series(["3", "8", np.nan])
tm.assert_series_equal(result, expected)
# negative index
result = ser.str.split("_").str.get(-3)
expected = Series(["3", "8", np.nan])
tm.assert_series_equal(result, expected)
def test_get_complex():
# GH 20671, getting value not in dict raising `KeyError`
ser = Series([(1, 2, 3), [1, 2, 3], {1, 2, 3}, {1: "a", 2: "b", 3: "c"}])
result = ser.str.get(1)
expected = Series([2, 2, np.nan, "a"])
tm.assert_series_equal(result, expected)
result = ser.str.get(-1)
expected = Series([3, 3, np.nan, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("to_type", [tuple, list, np.array])
def test_get_complex_nested(to_type):
ser = Series([to_type([to_type([1, 2])])])
result = ser.str.get(0)
expected = Series([to_type([1, 2])])
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
from __future__ import division
import numpy as np
import os.path
import sys
import pandas as pd
from base.uber_model import UberModel, ModelSharedInputs
from .therps_functions import TherpsFunctions
import time
from functools import wraps
def timefn(fn):
@wraps(fn)
def measure_time(*args, **kwargs):
t1 = time.time()
result = fn(*args, **kwargs)
t2 = time.time()
print("therps_model_rest.py@timefn: " + fn.func_name + " took " + "{:.6f}".format(t2 - t1) + " seconds")
return result
return measure_time
class TherpsInputs(ModelSharedInputs):
"""
Input class for Therps.
"""
def __init__(self):
"""Class representing the inputs for Therps"""
super(TherpsInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
"""
Therps constructor.
:param chem_name:
:param use:
:param formu_name:
:param percent_act_ing:
:param foliar_diss_hlife:
:param num_apps:
:param app_interval:
:param application_rate:
:param ld50_bird:
:param lc50_bird:
:param noaec_bird:
:param noael_bird:
:param species_of_the_tested_bird_avian_ld50:
:param species_of_the_tested_bird_avian_lc50:
:param species_of_the_tested_bird_avian_noaec:
:param species_of_the_tested_bird_avian_noael:
:param tw_bird_ld50:
:param tw_bird_lc50:
:param tw_bird_noaec:
:param tw_bird_noael:
:param mineau_sca_fact:
:param aw_herp_sm:
:param aw_herp_md:
:param aw_herp_slg:
:param awc_herp_sm:
:param awc_herp_md:
:param awc_herp_lg:
:param bw_frog_prey_mamm:
:param bw_frog_prey_herp:
:return:
"""
self.use = pd.Series([], dtype="object", name="use")
self.formu_name = pd.Series([], dtype="object", name="formu_name")
self.percent_act_ing = pd.Series([], dtype="float", name="percent_act_ing")
self.foliar_diss_hlife = pd.Series([], dtype="float64", name="foliar_diss_hlife")
self.num_apps = pd.Series([], dtype="int64", name="num_apps")
self.app_interval = pd.Series([], dtype="int", name="app_interval")
self.application_rate = pd.Series([], dtype="float", name="application_rate")
self.ld50_bird = pd.Series([], dtype="float", name="ld50_bird")
self.lc50_bird = pd.Series([], dtype="float", name="lc50_bird")
self.noaec_bird = pd.Series([], dtype="float", name="noaec_bird")
self.noael_bird = pd.Series([], dtype="float", name="noael_bird")
self.species_of_the_tested_bird_avian_ld50 = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_ld50")
self.species_of_the_tested_bird_avian_lc50 = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_lc50")
self.species_of_the_tested_bird_avian_noaec = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_noaec")
self.species_of_the_tested_bird_avian_noael = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_noael")
self.tw_bird_ld50 = pd.Series([], dtype="float", name="tw_bird_ld50")
self.tw_bird_lc50 = pd.Series([], dtype="float", name="tw_bird_lc50")
self.tw_bird_noaec = pd.Series([], dtype="float", name="tw_bird_noaec")
self.tw_bird_noael = pd.Series([], dtype="float", name="tw_bird_noael")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.aw_herp_sm = pd.Series([], dtype="float", name="aw_herp_sm")
self.aw_herp_md = pd.Series([], dtype="float", name="aw_herp_md")
self.aw_herp_lg = pd.Series([], dtype="float", name="aw_herp_lg")
self.awc_herp_sm = pd.Series([], dtype="float", name="awc_herp_sm")
self.awc_herp_md = pd.Series([], dtype="float", name="awc_herp_md")
self.awc_herp_lg = pd.Series([], dtype="float", name="awc_herp_lg")
self.bw_frog_prey_mamm = pd.Series([], dtype="float", name="bw_frog_prey_mamm")
self.bw_frog_prey_herp = pd.Series([], dtype="float", name="bw_frog_prey_herp")
## application rates and days of applications
#self.app_rates = pd.Series([], dtype="object") #Series of lists, each list contains app_rates of a model simulation run
#self.day_out = pd.Series([], dtype="object") #Series of lists, each list contains day #'s of applications within a model simulaiton run
class TherpsOutputs(object):
"""
Output class for Therps.
"""
def __init__(self):
"""Class representing the outputs for Therps"""
super(TherpsOutputs, self).__init__()
## application rates and days of applications
#self.day_out = pd.Series([], dtype='object', name='day_out')
#self.app_rates = pd.Series([], dtype='object', name='app_rates')
# TODO: Add these back in after deciding how to handle the numpy arrays
# timeseries of concentrations related to herbiferous food sources
# self.out_c_ts_sg = pd.Series([], dtype='float') # short grass
# self.out_c_ts_blp = pd.Series([], dtype='float') # broad-leafed plants
# self.out_c_ts_fp = pd.Series([], dtype='float') # fruits/pods
#
# self.out_c_ts_mean_sg = pd.Series([], dtype='float') # short grass
# self.out_c_ts_mean_blp = pd.Series([], dtype='float') # broad-leafed plants
# self.out_c_ts_mean_fp = pd.Series([], dtype='float') # fruits/pods
# Table 5
self.out_ld50_ad_sm = pd.Series([], dtype='float', name="out_ld50_ad_sm")
self.out_ld50_ad_md = pd.Series([], dtype='float', name="out_ld50_ad_md")
self.out_ld50_ad_lg = pd.Series([], dtype='float', name="out_ld50_ad_lg")
self.out_eec_dose_bp_sm = pd.Series([], dtype='float', name="out_eec_dose_bp_sm")
self.out_eec_dose_bp_md = pd.Series([], dtype='float', name="out_eec_dose_bp_md")
self.out_eec_dose_bp_lg = pd.Series([], dtype='float', name="out_eec_dose_bp_lg")
self.out_arq_dose_bp_sm = pd.Series([], dtype='float', name="out_arq_dose_bp_sm")
self.out_arq_dose_bp_md = pd.Series([], dtype='float', name="out_arq_dose_bp_md")
self.out_arq_dose_bp_lg = pd.Series([], dtype='float', name="out_arq_dose_bp_lg")
self.out_eec_dose_fr_sm = pd.Series([], dtype='float', name="out_eec_dose_fr_sm")
self.out_eec_dose_fr_md = pd.Series([], dtype='float', name="out_eec_dose_fr_md")
self.out_eec_dose_fr_lg = pd.Series([], dtype='float', name="out_eec_dose_fr_lg")
self.out_arq_dose_fr_sm = pd.Series([], dtype='float', name="out_arq_dose_fr_sm")
self.out_arq_dose_fr_md = pd.Series([], dtype='float', name="out_arq_dose_fr_md")
self.out_arq_dose_fr_lg = pd.Series([], dtype='float', name="out_arq_dose_fr_lg")
self.out_eec_dose_hm_md = pd.Series([], dtype='float', name="out_eec_dose_hm_md")
self.out_eec_dose_hm_lg = pd.Series([], dtype='float', name="out_eec_dose_hm_lg")
self.out_arq_dose_hm_md = pd.Series([], dtype='float', name="out_arq_dose_hm_md")
self.out_arq_dose_hm_lg = pd.Series([], dtype='float', name="out_arq_dose_hm_lg")
self.out_eec_dose_im_md = pd.Series([], dtype='float', name="out_eec_dose_im_md")
self.out_eec_dose_im_lg = pd.Series([], dtype='float', name="out_eec_dose_im_lg")
self.out_arq_dose_im_md = pd.Series([], dtype='float', name="out_arq_dose_im_md")
self.out_arq_dose_im_lg = pd.Series([], dtype='float', name="out_arq_dose_im_lg")
self.out_eec_dose_tp_md = pd.Series([], dtype='float', name="out_eec_dose_tp_md")
self.out_eec_dose_tp_lg = pd.Series([], dtype='float', name="out_eec_dose_tp_lg")
self.out_arq_dose_tp_md = pd.Series([], dtype='float', name="out_arq_dose_tp_md")
self.out_arq_dose_tp_lg = pd.Series([], dtype='float', name="out_arq_dose_tp_lg")
# Table 6
self.out_eec_diet_herp_bl = pd.Series([], dtype='float', name="out_eec_diet_herp_bl")
self.out_eec_arq_herp_bl = pd.Series([], dtype='float', name="out_eec_arq_herp_bl")
self.out_eec_diet_herp_fr = pd.Series([], dtype='float', name="out_eec_diet_herp_fr")
self.out_eec_arq_herp_fr = pd.Series([], dtype='float', name="out_eec_arq_herp_fr")
self.out_eec_diet_herp_hm = pd.Series([], dtype='float', name="out_eec_diet_herp_hm")
self.out_eec_arq_herp_hm = pd.Series([], dtype='float', name="out_eec_arq_herp_hm")
self.out_eec_diet_herp_im = pd.Series([], dtype='float', name="out_eec_diet_herp_im")
self.out_eec_arq_herp_im = pd.Series([], dtype='float', name="out_eec_arq_herp_im")
self.out_eec_diet_herp_tp = pd.Series([], dtype='float', name="out_eec_diet_herp_tp")
self.out_eec_arq_herp_tp = pd.Series([], dtype='float', name="out_eec_arq_herp_tp")
# Table 7
self.out_eec_diet_herp_bl = pd.Series([], dtype='float', name="out_eec_diet_herp_bl")
self.out_eec_crq_herp_bl = pd.Series([], dtype='float', name="out_eec_crq_herp_bl")
self.out_eec_diet_herp_fr = pd.Series([], dtype='float', name="out_eec_diet_herp_fr")
self.out_eec_crq_herp_fr = pd.Series([], dtype='float', name="out_eec_crq_herp_fr")
self.out_eec_diet_herp_hm = pd.Series([], dtype='float', name="out_eec_diet_herp_hm")
self.out_eec_crq_herp_hm = pd.Series([], dtype='float', name="out_eec_crq_herp_hm")
self.out_eec_diet_herp_im = pd.Series([], dtype='float', name="out_eec_diet_herp_im")
self.out_eec_crq_herp_im = pd.Series([], dtype='float', name="out_eec_crq_herp_im")
self.out_eec_diet_herp_tp = pd.Series([], dtype='float', name="out_eec_diet_herp_tp")
self.out_eec_crq_herp_tp = pd.Series([], dtype='float', name="out_eec_crq_herp_tp")
# Table 8
self.out_eec_dose_bp_sm_mean = pd.Series([], dtype='float', name="out_eec_dose_bp_sm_mean")
self.out_eec_dose_bp_md_mean = pd.Series([], dtype='float', name="out_eec_dose_bp_md_mean")
self.out_eec_dose_bp_lg_mean = pd.Series([], dtype='float', name="out_eec_dose_bp_lg_mean")
self.out_arq_dose_bp_sm_mean = pd.Series([], dtype='float', name="out_arq_dose_bp_sm_mean")
self.out_arq_dose_bp_md_mean = pd.Series([], dtype='float', name="out_arq_dose_bp_md_mean")
self.out_arq_dose_bp_lg_mean = pd.Series([], dtype='float', name="out_arq_dose_bp_lg_mean")
self.out_eec_dose_fr_sm_mean =
|
pd.Series([], dtype='float', name="out_eec_dose_fr_sm_mean")
|
pandas.Series
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Quota for Affinity."""
import logging
import pandas as pd
from sklearn import ensemble
class AffinityModelBase(object):
"""Base class for affinity regression."""
def __init__(self, affinity_file, affinity_value):
self.affinity_report = pd.read_csv(affinity_file)
self.standard = affinity_value
self.ml_model = ensemble.RandomForestClassifier(n_estimators=20)
def build_model(self):
"""Build regression model."""
desc = self.affinity_report['desc']
inputs = self.generate_input_space(desc)
labels = self.generate_label()
self.ml_model.fit(inputs, labels)
def predict(self, input):
"""Predict output from input."""
return self.ml_model.predict(input[:1])[0]
def generate_input_space(self, desc):
"""Generate input space from desc."""
if desc is None:
return None
space_list = []
for idx in range(len(desc)):
desc_item = eval(desc.iloc[idx])
space_dict = {}
self.init_space_dict(space_dict)
for key, value in desc_item.items():
self.get_space_dict(key, value, space_dict)
# space_dict[_metric_key] = eval(pfms.iloc[idx])[_metric_key]
if space_dict:
space_list.append(space_dict)
return pd.DataFrame(space_list)
def generate_label(self):
"""Generate label from affinity report."""
_pfms = self.affinity_report['performance']
_metric_key = eval(self.affinity_report['_objective_keys'][0])[0]
label_list = []
for pfm in _pfms:
value = eval(pfm)[_metric_key]
clc = 1 if value > self.standard else 0
label_list.append({_metric_key: clc})
return
|
pd.DataFrame(label_list)
|
pandas.DataFrame
|
from boadata import wrap
import numpy as np
import pandas as pd
class TestStatisticsMixin:
@property
def array(self):
x = [1, 2, 3, 4, 6]
native_array = np.array(x)
return wrap(native_array)
def test_mean(self):
assert self.array.mean() == 3.2
def test_median(self):
assert self.array.median() == 3
def test_mode(self):
array1 = wrap(np.array([1, 1, 2, 3]))
assert array1.mode() == 1
array2 = wrap(np.array([1, 1, 2, 2, 3, 4, 5]))
assert array2.mode() == 1
array3 = wrap(pd.Series([1, 2, 3, 4, 4, 0, 0]))
assert array3.mode() == 0
array4 = wrap(
|
pd.Series([1, 2, 3, 4, 4, 0, 0, -2, -2])
|
pandas.Series
|
import functionfile as ff
import v_error_eval as v_eval
import time_measurement as tm
import image_processing_function as ipf
import parallel_print_image as ppi
import pandas as pd
import os
import numpy as np
import sys
import matplotlib
matplotlib.use('agg')
tmp = sys.argv
program_num = "12(for_MIRU_not_Trans)"
c_mode = 0 # 0:MU NMF 1:HALS NMF 2:FGD 3:GCD
NMFQP = False
Time_Measurement = True
output_TM_matrix = True
Error_Cal = True
# prepare parameter in python -----------------------------------------------------------------------------------------
if c_mode == 0:
c_bar_max = 70
iteration_list = [5000]
c_method = "MU"
elif c_mode == 1:
c_bar_max = 70
iteration_list = [1000]
c_method = "HALS"
elif c_mode == 2:
c_bar_max = 70
if Error_Cal:
iteration_list = [1000]
else:
iteration_list = []
for i in range(50, 1001, 50):
iteration_list.append(i)
c_method = "FGD"
elif c_mode == 3:
c_bar_max = 70
if Error_Cal:
iteration_list = [1000]
else:
iteration_list = []
for i in range(50, 1001, 50):
iteration_list.append(i)
c_method = "GCD"
# parameter from bash -------------------------------------------------------------------------------------------------
if tmp[5] == "test":
program_num = "test"
iteration_list = [5]
test_flag = True
else:
test_flag = False
if tmp[4] == "cbcl":
use_data = "CBCL/train"
image_num_list = [0, 500, 1000, 1500]
elif tmp[4] == "yale":
use_data = "YaleFD/faces"
image_num_list = [0, 20, 40, 60]
r = int(tmp[1])
approximate_size = int(tmp[2])
wh_seed = int(tmp[3]) # base is 1
progress = (int(tmp[6]) - 1) * len(iteration_list) + 1
all_program_num = int(tmp[7]) * len(iteration_list)
r_path = "/home/ionishi/mnt/workspace/sketchingNMF/face_data/" + use_data
w_path = "/home/ionishi/mnt/workspace/sketchingNMF/{}/{}/{}/r,k/r={}/k={}" \
.format(use_data, program_num, c_method, r, approximate_size)
V, im_var, im_hol = ipf.read_pgm(r_path)
# when V size is n > m , transpose V
t_flag = False
if V.shape[0] > V.shape[1]:
V = V.T
t_flag = True
n, m = V.shape
if (approximate_size == m) | test_flag:
SNMF_only = False
else:
SNMF_only = True
for ite_i, iteration in enumerate(iteration_list):
program_code = "realdata_" + ff.program_name(0, c_mode, n, m, r, approximate_size, iteration, 0, wh_seed, program_num)
os.makedirs(w_path + "/time", exist_ok=True)
print("start " + program_code + " >>>>>\n")
# time measurement ------------------------------------------------------------------------------------------------
if Time_Measurement:
if output_TM_matrix:
t_result, W, H, W_s, H_s, small_matrix = \
tm.parallel_time_measurement(r, approximate_size, V, iteration, wh_seed, c_mode, snmf_only=SNMF_only, output_matrices=output_TM_matrix, t_flag=t_flag)
program_code = "realdata_" + ff.program_name(2, c_mode, n, m, r, approximate_size, iteration, 0, wh_seed, program_num)
os.makedirs(w_path + "/matrix", exist_ok=True)
if not SNMF_only:
np.savetxt(w_path + "/matrix/w_" + program_code + ".csv", W, delimiter=",")
np.savetxt(w_path + "/matrix/h_" + program_code + ".csv", H, delimiter=",")
np.savetxt(w_path + "/matrix/W_s_" + program_code + ".csv", W_s, delimiter=",")
np.savetxt(w_path + "/matrix/H_s_" + program_code + ".csv", H_s, delimiter=",")
if V.shape[0] <= V.shape[1]:
np.savetxt(w_path + "/matrix/H_small_" + program_code + ".csv", small_matrix, delimiter=",")
else:
np.savetxt(w_path + "/matrix/W_small_" + program_code + ".csv", small_matrix, delimiter=",")
program_code = "realdata_" + ff.program_name(0, c_mode, n, m, r, approximate_size, iteration, 0, wh_seed, program_num)
else:
t_result = tm.parallel_time_measurement(r, approximate_size, V, iteration, wh_seed, c_mode, snmf_only=SNMF_only)
print("\n{} / {} finish time measurement {} >>>>>\n".format(progress + ite_i, all_program_num, program_code))
# save time result
if SNMF_only:
tf_result = pd.DataFrame({"SNMF time": t_result[1]}, index=[0])
else:
tf_result = pd.DataFrame({"NMF time": t_result[0], "SNMF time": t_result[1]}, index=[0])
tf_result.to_csv(w_path + "/time/" + program_code + ".csv")
# calculate --------------------------------------------------------------------------------------------------------
if Error_Cal:
os.makedirs(w_path + "/graph", exist_ok=True)
os.makedirs(w_path + "/error", exist_ok=True)
os.makedirs(w_path + "/matrix", exist_ok=True)
program_code = "realdata_" + ff.program_name(2, c_mode, n, m, r, approximate_size, iteration, 0, wh_seed, program_num)
nmf_error, snmf_error, W, H, W_s, H_s, small_matrix = \
v_eval.parallel_v_error_eval(r, approximate_size, V, iteration, wh_seed, c_mode, NMFQP, t_flag, snmf_only=SNMF_only)
print("\n {} / {} finish calculate ! {}".format(progress + ite_i, all_program_num, program_code))
# save matrix
if not SNMF_only:
np.savetxt(w_path + "/matrix/w_" + program_code + ".csv", W, delimiter=",")
np.savetxt(w_path + "/matrix/h_" + program_code + ".csv", H, delimiter=",")
np.savetxt(w_path + "/matrix/W_s_" + program_code + ".csv", W_s, delimiter=",")
np.savetxt(w_path + "/matrix/H_s_" + program_code + ".csv", H_s, delimiter=",")
if V.shape[0] <= V.shape[1]:
np.savetxt(w_path + "/matrix/H_small_" + program_code + ".csv", small_matrix, delimiter=",")
else:
np.savetxt(w_path + "/matrix/W_small_" + program_code + ".csv", small_matrix, delimiter=",")
# save error list
if not SNMF_only:
e_result = pd.DataFrame([nmf_error, snmf_error], index=["NMF error", "SNMF error"])
else:
e_result =
|
pd.DataFrame([snmf_error], index=["SNMF error"])
|
pandas.DataFrame
|
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import glob
import os
import pandas as pd
from ts_datasets.anomaly.base import TSADBaseDataset
class Synthetic(TSADBaseDataset):
"""
Wrapper to load a sythetically generated dataset.
The dataset was generated using three base time series, each of which
was separately injected with shocks, spikes, dips and level shifts, making
a total of 15 time series (including the base time series without anomalies).
Subsets can are defined by the base time series used ("horizontal",
"seasonal", "upward_downward"), or the type of injected anomaly ("shock",
"spike", "dip", "level"). The "anomaly" subset refers to all times series with
injected anomalies (12) while "base" refers to all time series without them (3).
"""
base_ts_subsets = ["horizontal", "seasonal", "upward_downward"]
anomaly_subsets = ["shock", "spike", "dip", "level", "trend"]
valid_subsets = ["anomaly", "all", "base"] + base_ts_subsets + anomaly_subsets
def __init__(self, subset="anomaly", rootdir=None):
super().__init__()
assert subset in self.valid_subsets, f"subset should be in {self.valid_subsets}, but got {subset}"
self.subset = subset
if rootdir is None:
fdir = os.path.dirname(os.path.abspath(__file__))
merlion_root = os.path.abspath(os.path.join(fdir, "..", "..", ".."))
rootdir = os.path.join(merlion_root, "data", "synthetic_anomaly")
csvs = sorted(glob.glob(f"{rootdir}/*.csv"))
if subset == "base":
csvs = [csv for csv in csvs if "anom" not in os.path.basename(csv)]
elif subset != "all":
csvs = [csv for csv in csvs if "anom" in os.path.basename(csv)]
if subset in self.base_ts_subsets + self.anomaly_subsets:
csvs = [csv for csv in csvs if subset in os.path.basename(csv)]
for csv in csvs:
df =
|
pd.read_csv(csv)
|
pandas.read_csv
|
#!/usr/bin/env python
import html
import pickle as pkl
from collections import OrderedDict
from pathlib import Path
import pandas as pd
from flask import url_for
from .utils import fopen, get_image_db
|
pd.set_option('display.max_colwidth', None)
|
pandas.set_option
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2019 European Commission (JRC);
# Licensed under the EUPL (the 'Licence');
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl
import itertools as itt
import logging
import unittest
import numpy as np
import pandas as pd
import pytest
from wltp import datamodel
from wltp.downscale import (
calc_downscale_factor,
decide_wltc_class,
downscale_by_recursing,
downscale_by_scaling,
downscale_class_velocity,
)
from wltp.invariants import round1
log = logging.getLogger(__name__)
def test_smoke1():
logging.getLogger().setLevel(logging.DEBUG)
test_mass = 1577.3106
p_rated = 78.6340
v_max = 186.4861
f0 = 152.5813
f1 = 307.5789
f2 = 0.0486
f_inertial = 1.03 # TODO: get it from schema-default
## Decide WLTC-class.
#
wltc = datamodel.get_wltc_data()
wltc_class = decide_wltc_class(wltc, p_rated / test_mass, v_max)
class_data = wltc["classes"][wltc_class]
V = pd.Series(class_data["v_cycle"])
f_downscale_threshold = 0.01 # TODO: get it from schema-default
f_downscale_decimals = 3 # TODO: get it from schema-default
dsc_data = class_data["downscale"]
phases = dsc_data["phases"]
p_max_values = dsc_data["p_max_values"]
downsc_coeffs = dsc_data["factor_coeffs"]
f_downscale, _orig_f = calc_downscale_factor(
p_max_values,
downsc_coeffs,
p_rated,
f_downscale_threshold,
f_downscale_decimals,
test_mass,
f0,
f1,
f2,
f_inertial,
)
if f_downscale > 0:
V = downscale_class_velocity(V, f_downscale, phases)
# print(
# "Class(%s), f_dnscl(%s), DIFFs:\n%s" % (wclass, f_downscale, diffs[bad_ix])
# )
# plt.plot(V, "r")
# plt.plot(V1, "b")
# plt.plot(V2, "g")
# plt.show()
# raise AssertionError(
# "Class(%s), f_dnscl(%s)" % (wclass, f_downscale)
# )
def test_smoke2():
wclasses = datamodel.get_wltc_data()["classes"]
test_data = [
(pd.Series(wclass["v_cycle"]), wclass["downscale"]["phases"], f_downscale)
for wclass in wclasses.values()
for f_downscale in np.linspace(0.1, 1, 10)
]
for (V, phases, f_downscale) in test_data:
downscale_class_velocity(V, f_downscale, phases)
_wltc = datamodel.get_wltc_data()
@pytest.mark.parametrize("wclass", _wltc["classes"])
def test_recurse_vs_scaling(wclass):
"""Compare downcalings with the both methods: simplified (scale by multiply) and by_the_spec (iterativelly scale accelerations)."""
from matplotlib import pyplot as plt
# Scaling == Recurse only with this!!
def double_round(n, decimals):
return round1(round1(n, decimals + 2), decimals)
pd_opts = [
"display.max_rows",
None,
"display.max_columns",
None,
"display.precision",
16,
"display.float_format",
"{:0.16f}".format,
"display.width",
160,
]
v_decimals = 1
class_data = _wltc["classes"][wclass]
V = pd.Series(class_data["v_cycle"])
phases = class_data["downscale"]["phases"]
bad_accuracies, bad_rounds = {}, {}
for f_downscale in np.arange(0, 4, 0.1):
V1 = downscale_by_recursing(V, f_downscale, phases)
V2 = downscale_by_scaling(V, f_downscale, phases)
bad_ix = ~np.isclose(V1, V2)
if bad_ix.any():
errs = pd.concat(
(V1, V2, V1 - V2), axis=1, keys=["recurse", "rescale", "diff"]
)[bad_ix]
bad_accuracies[f_downscale] = errs
bad_ix = (
double_round(V1, v_decimals).to_numpy()
!= double_round(V2, v_decimals).to_numpy()
)
if bad_ix.any():
bad_rounds[f_downscale] = pd.concat(
(V1, V2, (V1 - V2).abs()), axis=1, keys=["recurse", "rescale", "diff"]
)[bad_ix]
if bad_accuracies:
errs = pd.concat((bad_accuracies.values()), axis=0, keys=bad_accuracies.keys())
with pd.option_context(*pd_opts):
pytest.fail(f"{wclass}: ACCURACY errors!\n{errs}\n{errs.describe()}")
if bad_rounds:
rounded = (double_round(i, v_decimals) for i in bad_rounds.values())
rounded = pd.concat(rounded, axis=0, keys=bad_rounds.keys())
precise = pd.concat((bad_rounds.values()), axis=0, keys=bad_rounds.keys())
errs = pd.concat((rounded, precise), axis=1, keys=["rounded", "precise"])
with
|
pd.option_context(*pd_opts)
|
pandas.option_context
|
import os
import pickle
import re
import string
import pandas as pd
from keras.preprocessing import text, sequence
from scipy import sparse
from sklearn.feature_extraction.text import TfidfVectorizer
from utils.constants import CLASS_NAMES, MAX_FEATURES, MAX_LEN
from utils.preprocessing import clean_text, get_embedingsget_embeddings
def load_data(train_df_path, test_df_path, embedings_file):
train =
|
pd.read_csv(train_df_path)
|
pandas.read_csv
|
import json
from boto3 import client as boto3_client
import pandas as pd
lambda_client = boto3_client('lambda')
# import spacy
# nlp = spacy.load('/opt/en_core_web_sm-2.1.0')
# from metrics import *
def lambda_handler(event, context):
# test_run = 1
print(event)
if 'body' in event:
in_txt = json.loads(event['body'])['in_txt']
author = json.loads(event['body'])['author_name']
stage = json.loads(event['body'])['stage']
else:
in_txt = event['in_txt']
author = event['author_name']
stage = event['stage']
if stage == 'test':
f_ex_out = open ('example_output_new.json', "r")
example_output = json.loads(f_ex_out.read())
return {
'statusCode': 200,
'body': json.dumps(example_output),
'headers': {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': 'Content-Type,Authorization',
'Access-Control-Allow-Methods': 'OPTIONS,GET, POST, PUT, DELETE',
'Access-Control-Allow-Credentials': 'true',
'Content-Type': 'application/json'
}
}
print(in_txt)
print(author)
if author.upper() == 'DICKENS':
f_author = open ('top5_Dickens.json', "r")
author_metrics = ["avgWordFrequencyClass","countFunctionalWords","daleChallReadability","simpsonsIndex","specicalCharacterCount"]
elif author.upper() == 'FITZGERALD':
f_author = open ('top5_Fitzgerald.json', "r")
author_metrics = ["avgWordFrequencyClass","countFunctionalWords","shannonEntropy","specicalCharacterCount","typeTokenRatio"]
elif author.upper() == 'TWAIN':
f_author = open ('top5_Twain.json', "r")
author_metrics = ["avgSyllablesPerWord","avgWordFrequencyClass","avgWordLength","countFunctionalWords","specicalCharacterCount"]
else:
f_author = open ('top5_Austen.json', "r")
author_metrics = ["avgWordFrequencyClass","brunetsMeasureW","hapaxLegemena","typeTokenRatio","yulesK"]
top5_author = json.loads(f_author.read())
body = {"in_txt" : in_txt ,
"chunk" : 'text'
}
clean1_msg = {"body" : body , 'invocation_type' : 'Sync'}
clean1_res = lambda_client.invoke(FunctionName="userTextClean1",
InvocationType='RequestResponse',
Payload=json.dumps(clean1_msg))
print(clean1_res)
clean1_res_bdy = json.load(clean1_res['Payload'])['body']['Text']
print(clean1_res_bdy)
stdPL_msg = {"data" : clean1_res_bdy, 'invocation_type' : 'Sync'}
print(stdPL_msg)
stdPL_res = lambda_client.invoke(FunctionName="stylometry_standardPL",
InvocationType='RequestResponse',
Payload=json.dumps(stdPL_msg))
print('standard PL')
stdPL_res_bdy = json.load(stdPL_res['Payload'])['body']
print(stdPL_res_bdy)
stdPL_res_json = json.loads(stdPL_res_bdy)
print(stdPL_res_json)
stdPL_res_DF =
|
pd.DataFrame(stdPL_res_json)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.