prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import numpy as np
import holidays
import statsmodels.formula.api as sm
import time
from Helper import helper
import datetime
class DR(object):
def __init__(self, dataframe):
df = dataframe.copy()
self.lm_data = helper.DR_Temp_data_cleaning(df)
self.name = 'DR'
def set_date(self, date):
self.date = date
def model_building(self, training_data, station):
ml = sm.ols(formula=station + "_Temp_Log~Load_Lag_48+Humi_Lag_48+I(Load_Lag_48**2)+I(Humi_Lag_48**2)+\
Hour+Weekday+Month+Holiday+ RIV_Temp_Log_Lag_48+I(RIV_Temp_Log_Lag_48**2)+\
Month:Load_Lag_48+Month:Humi_Lag_48+\
Hour:Load_Lag_48+Hour:Humi_Lag_48+\
Holiday:Load_Lag_48+Holiday:Humi_Lag_48", data=training_data).fit()
return ml
def model_selection_mape_rmse(self, station):
training_days = 30
date_time = pd.to_datetime(self.date) + datetime.timedelta(hours=7)
test_start_date = date_time - datetime.timedelta(days=training_days + 1)
train_end_date = test_start_date - datetime.timedelta(hours=8)
test_end_date = date_time - datetime.timedelta(hours=8)
forecast = []
x_test = []
this_date = test_start_date
for counter in range(training_days):
train_end_date = this_date
# print(train_end_date)
Y_start, Y_end = this_date + datetime.timedelta(hours=1), this_date + datetime.timedelta(hours=40)
start = time.time()
x_train = self.lm_data['2014-01-03 01:00':str(train_end_date)]
ml = self.model_building(x_train, station)
test = self.lm_data[str(Y_start):str(Y_end)]
p = ml.predict(test)
p = pd.DataFrame(p)
forecast.append(np.array(np.exp(p[0])))
x_test.append(np.array(test[station + '_Temp']))
end = time.time()
this_date = this_date + datetime.timedelta(hours=24)
result_mape = []
result_rmse = []
for index in range(len(forecast)):
result_mape.append(helper.mape(np.array(x_test[index]), np.array(forecast[index])))
result_rmse.append(helper.rmse(np.array(x_test[index]), np.array(forecast[index])))
self.mape = np.mean(result_mape)
self.rmse = np.mean(result_rmse)
return self.mape, self.rmse
def predict_next_40hours_temp(self, station):
today = pd.to_datetime(self.date) + datetime.timedelta(hours=7)
train_end_date = today - datetime.timedelta(hours=1)
x_train = self.lm_data['2014-01-03 01:00':str(train_end_date)]
# print('building the latest model')
ml = self.model_building(x_train, station)
# print('building process complete')
Y_start, Y_end = today + datetime.timedelta(hours=1), today + datetime.timedelta(hours=40)
# print(f'Y_start {Y_start}, Y_end: {Y_end}')
X = self.lm_data[str(Y_start):str(Y_end)]
p = ml.predict(X)
p = pd.DataFrame(p)
p = np.exp(p[0])
# print('with time stamp: ', p)
self.forecast = p.tolist()
return self.forecast
if __name__ == '__main__':
path = '../../Data/Hourly_Temp_Humi_Load-6.csv'
df = | pd.read_csv(path) | pandas.read_csv |
import json, os, sys
import pandas as pd
from urllib.request import urlopen
from xml.dom import minidom
from json import load
from pandas.io.json import json_normalize
def filterIQM(apidf, filter_list):
""" Loads the API info and filters based on user-provided
parameters. Filter parameters should be a list of strings
and string formats should be "(VAR) (Operator) (Value)".
Example: ['TR == 3.0'] or ['TR > 1.0','FD < .3']
Note: Each element in each string is SPACE separated!
Args:
apidf (pandas dataframe): Pandas df of the API info.
filter_list = (list): List of argument strings that will
be joined by ampersands to use pandas query function.
Returns: A pandas dataframe containing data pulled from
the MRICQ API, but filtered to contain only your match
specifications.
"""
cols = apidf.columns
cols = cols.map(lambda x: x.replace(".", "_"))
apidf.columns = cols
## FOR LATER: ##
# CONTROL WHICH EXPECTED VARIABLE LIST YOU CHECK DEPENDING
# ON THE MODALITY TYPE. (This will be useless if it's a checkbox
# or a pull down in a web interface...)
# bold_filters = {'SNR':'snr','TSNR':'tsnr',
# 'DVAR':'dvars_nstd','FD':'fd_mean',
# 'FWHM':'fwhm_avg','Tesla':'bids_meta_MagneticFieldStrength',
# 'gsr_x':'gsr_x','gsr_y':'gsr_y',
# 'TE':'bids_meta_EchoTime','TR':'bids_meta_RepetitionTime'}
# t1_filters = {'SNR_TOTAL':'snr_total',
# 'SNR_GM':'snr_gm',
# 'SNR_WM':'snr_wm',
# 'SNR_CSF':'snr_csf',
# 'CNR':'cnr',
# 'EFC':'efc',
# 'FWHM':'fwhm_avg',
# 'TE':'bids_meta_EchoTime',
# 'TR':'bids_meta_RepetitionTime',
# 'Tesla':'bids_meta_MagneticFieldStrength'
# }
# t2_filters = {
# 'SNR_TOTAL':'snr_total',
# 'SNR_GM':'snr_gm',
# 'SNR_WM':'snr_wm',
# 'SNR_CSF':'snr_csf',
# 'CNR':'cnr',
# 'EFC':'efc'
# }
query = []
expected_filters = {'SNR':'snr','TSNR':'tsnr','SNR_WM':'snr_wm',
'SNR_CSF':'snr_csf','CNR':'cnr','EFC':'efc',
'DVAR':'dvars_nstd','FD':'fd_mean',
'FWHM':'fwhm_avg','Tesla':'bids_meta_MagneticFieldStrength',
'gsr_x':'gsr_x','gsr_y':'gsr_y',
'TE':'bids_meta_EchoTime','TR':'bids_meta_RepetitionTime',
'SNR_TOTAL':'snr_total','SNR_GM':'snr_gm','SNR_WM':'snr_wm',
'SNR_CSF':'snr_csf','CNR':'cnr','EFC':'efc','FWHM':'fwhm_avg',
'TE':'bids_meta_EchoTime','TR':'bids_meta_RepetitionTime',
'Tesla':'bids_meta_MagneticFieldStrength'
}
filter_check = list(expected_filters.keys())
for filt in filter_list:
var = filt.split(' ')[0]
op = filt.split(' ')[1]
val = filt.split(' ')[2]
if var in filter_check:
filt_str = expected_filters[var] + op + val
query.append(filt_str)
filtered_df = apidf.query(' & '.join(query))
return filtered_df
# Functions are in alphabetical order, because lazy! ##
def load_groupfile(infile_path):
""" Load your MRIQC group tsv file and return a pandas df to then
use for visualizations or any other functions down the line.
Args:
infile_path (string): Path to your MRIQC tsv that you got
from running MRIQC on your LOCAL group. However, this can
be used to load any other downloaded/shared tsv for future
integration
Returns: A pandas dataframe of your tsv file that was output by
MRIQC. (This can also be tsv files shared or downloaded, such
as the ABIDE example tsv available online).
"""
name, ext = os.path.splitext(os.path.basename(infile_path))
if ext == '.tsv':
df = pd.read_table(infile_path, header=0)
elif ext == '.csv':
df = pd.read_csv(infile_path, header=0)
else:
raise ValueError("File type not supported: " + ext)
return df
def merge_dfs(userdf, filtered_apidf):
""" Merges the user/group dataframe and the filtered API dataframe
while adding a groupby variable. Name is "SOURCE". User entries
are "USER" and API entries are "API".
Args:
udf (pandas df): User MRIQC tsv converted to pandas dataframe
apidf (pandas df): API info, filtered and stored in padas
dataframe.
Returns: A merged pandas dataframe containing the user group info and
the filtered API info. A "groupby" header called "SOURCE" is added
with a "USER" or "API" entry for easy sorting/splitting.
"""
userdf['SOURCE']='USER'
filtered_apidf['SOURCE']='API'
filtered_apidf.rename(columns={'_id': 'bids_name'}, inplace=True)
merged_df = | pd.concat([userdf,filtered_apidf], sort=True) | pandas.concat |
"""
"""
"""
>>> # ---
>>> # SETUP
>>> # ---
>>> import os
>>> import logging
>>> logger = logging.getLogger('PT3S.Rm')
>>> # ---
>>> # path
>>> # ---
>>> if __name__ == "__main__":
... try:
... dummy=__file__
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ','path = os.path.dirname(__file__)'," ."))
... path = os.path.dirname(__file__)
... except NameError:
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ',"path = '.' because __file__ not defined and: "," from Rm import Rm"))
... path = '.'
... from Rm import Rm
... else:
... path = '.'
... logger.debug("{0:s}{1:s}".format('Not __main__ Context: ',"path = '.' ."))
>>> try:
... from PT3S import Mx
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Mx: ImportError: ","trying import Mx instead ... maybe pip install -e . is active ..."))
... import Mx
>>> try:
... from PT3S import Xm
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Xm: ImportError: ","trying import Xm instead ... maybe pip install -e . is active ..."))
... import Xm
>>> # ---
>>> # testDir
>>> # ---
>>> # globs={'testDir':'testdata'}
>>> try:
... dummy= testDir
... except NameError:
... testDir='testdata'
>>> # ---
>>> # dotResolution
>>> # ---
>>> # globs={'dotResolution':''}
>>> try:
... dummy= dotResolution
... except NameError:
... dotResolution=''
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> pd.set_option('display.max_columns',None)
>>> pd.set_option('display.width',666666666)
>>> # ---
>>> # LocalHeatingNetwork SETUP
>>> # ---
>>> xmlFile=os.path.join(os.path.join(path,testDir),'LocalHeatingNetwork.XML')
>>> xm=Xm.Xm(xmlFile=xmlFile)
>>> mx1File=os.path.join(path,os.path.join(testDir,'WDLocalHeatingNetwork\B1\V0\BZ1\M-1-0-1'+dotResolution+'.MX1'))
>>> mx=Mx.Mx(mx1File=mx1File,NoH5Read=True,NoMxsRead=True)
>>> mx.setResultsToMxsFile(NewH5Vec=True)
5
>>> xm.MxSync(mx=mx)
>>> rm=Rm(xm=xm,mx=mx)
>>> # ---
>>> # Plot 3Classes False
>>> # ---
>>> plt.close('all')
>>> ppi=72 # matplotlib default
>>> dpi_screen=2*ppi
>>> fig=plt.figure(dpi=dpi_screen,linewidth=1.)
>>> timeDeltaToT=mx.df.index[2]-mx.df.index[0]
>>> # 3Classes und FixedLimits sind standardmaessig Falsch; RefPerc ist standardmaessig Wahr
>>> # die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt immer ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66,pFWVBGCategory=['BLNZ1u5u7'],pVICsDf=pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']}))
>>> # ---
>>> # Check pFWVB Return
>>> # ---
>>> f=lambda x: "{0:8.5f}".format(x)
>>> print(pFWVB[['Measure','MCategory','GCategory','VIC']].round(2).to_string(formatters={'Measure':f}))
Measure MCategory GCategory VIC
0 0.81000 Top BLNZ1u5u7 NaN
1 0.67000 Middle NaN
2 0.66000 Middle BLNZ1u5u7 NaN
3 0.66000 Bottom BLNZ1u5u7 VIC1
4 0.69000 Middle NaN
>>> # ---
>>> # Print
>>> # ---
>>> (wD,fileName)=os.path.split(xm.xmlFile)
>>> (base,ext)=os.path.splitext(fileName)
>>> plotFileName=wD+os.path.sep+base+'.'+'pdf'
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
>>> plt.savefig(plotFileName,dpi=2*dpi_screen)
>>> os.path.exists(plotFileName)
True
>>> # ---
>>> # Plot 3Classes True
>>> # ---
>>> plt.close('all')
>>> # FixedLimits wird automatisch auf Wahr gesetzt wenn 3Classes Wahr ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasure3Classes=True,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66)
>>> # ---
>>> # LocalHeatingNetwork Clean Up
>>> # ---
>>> if os.path.exists(mx.h5File):
... os.remove(mx.h5File)
>>> if os.path.exists(mx.mxsZipFile):
... os.remove(mx.mxsZipFile)
>>> if os.path.exists(mx.h5FileVecs):
... os.remove(mx.h5FileVecs)
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
"""
__version__='172.16.58.3.dev1'
import warnings # 3.6
#...\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
# from ._conv import register_converters as _register_converters
warnings.simplefilter(action='ignore', category=FutureWarning)
#C:\Users\Wolters\Anaconda3\lib\site-packages\matplotlib\cbook\deprecation.py:107: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# warnings.warn(message, mplDeprecation, stacklevel=1)
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import os
import sys
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import pandas as pd
import h5py
from collections import namedtuple
from operator import attrgetter
import subprocess
import warnings
import tables
import math
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.colorbar import make_axes
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
from matplotlib import markers
from matplotlib.path import Path
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import scipy
import networkx as nx
from itertools import chain
import math
import sys
from copy import deepcopy
from itertools import chain
import scipy
from scipy.signal import savgol_filter
import logging
# ---
# --- PT3S Imports
# ---
logger = logging.getLogger('PT3S')
if __name__ == "__main__":
logger.debug("{0:s}{1:s}".format('in MODULEFILE: __main__ Context','.'))
else:
logger.debug("{0:s}{1:s}{2:s}{3:s}".format('in MODULEFILE: Not __main__ Context: ','__name__: ',__name__," ."))
try:
from PT3S import Mx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Mx - trying import Mx instead ... maybe pip install -e . is active ...'))
import Mx
try:
from PT3S import Xm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Xm - trying import Xm instead ... maybe pip install -e . is active ...'))
import Xm
try:
from PT3S import Am
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Am - trying import Am instead ... maybe pip install -e . is active ...'))
import Am
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
# ---
# --- main Imports
# ---
import argparse
import unittest
import doctest
import math
from itertools import tee
# --- Parameter Allgemein
# -----------------------
DINA6 = (4.13 , 5.83)
DINA5 = (5.83 , 8.27)
DINA4 = (8.27 , 11.69)
DINA3 = (11.69 , 16.54)
DINA2 = (16.54 , 23.39)
DINA1 = (23.39 , 33.11)
DINA0 = (33.11 , 46.81)
DINA6q = ( 5.83, 4.13)
DINA5q = ( 8.27, 5.83)
DINA4q = ( 11.69, 8.27)
DINA3q = ( 16.54,11.69)
DINA2q = ( 23.39,16.54)
DINA1q = ( 33.11,23.39)
DINA0q = ( 46.81,33.11)
dpiSize=72
DINA4_x=8.2677165354
DINA4_y=11.6929133858
DINA3_x=DINA4_x*math.sqrt(2)
DINA3_y=DINA4_y*math.sqrt(2)
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
ylimpD=(-5,70)
ylimpDmlc=(600,1350) #(300,1050)
ylimQD=(-75,300)
ylim3rdD=(0,3)
yticks3rdD=[0,1,2,3]
yGridStepsD=30
yticksALD=[0,3,4,10,20,30,40]
ylimALD=(yticksALD[0],yticksALD[-1])
yticksRD=[0,2,4,10,15,30,45]
ylimRD=(-yticksRD[-1],yticksRD[-1])
ylimACD=(-5,5)
yticksACD=[-5,0,5]
yticksTVD=[0,100,135,180,200,300]
ylimTVD=(yticksTVD[0],yticksTVD[-1])
plotTVAmLabelD='TIMER u. AM [Sek. u. (N)m3*100]'
def getDerivative(df,col,shiftSize=1,windowSize=60,fct=None,savgol_polyorder=None):
"""
returns a df
df: the df
col: the col of df to be derived
shiftsize: the Difference between 2 indices for dValue and dt
windowSize: size for rolling mean or window_length of savgol_filter; choosen filtertechnique is applied after fct
windowsSize must be an even number
for savgol_filter windowsSize-1 is used
fct: function to be applied on dValue/dt
savgol_polyorder: if not None savgol_filter is applied; pandas' rolling.mean() is applied otherwise
new cols:
dt (with shiftSize)
dValue (from col)
dValueDt (from col); fct applied
dValueDtFiltered; choosen filtertechnique is applied
"""
mDf=df.dropna().copy(deep=True)
try:
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
if savgol_polyorder == None:
mDf['dValueDtFiltered']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
else:
mDf['dValueDtFiltered']=savgol_filter(mDf['dValueDt'].values,windowSize-1, savgol_polyorder)
mDf=mDf.iloc[windowSize/2+1+savgol_polyorder-1:]
#mDf=mDf.iloc[windowSize-1:]
except Exception as e:
raise e
finally:
return mDf
def fCVDNodesFromName(x):
Nodes=x.replace('°','~')
Nodes=Nodes.split('~')
Nodes =[Node.lstrip().rstrip() for Node in Nodes if len(Node)>0]
return Nodes
def fgetMaxpMinFromName(CVDName,dfSegsNodesNDataDpkt):
"""
returns max. pMin for alle NODEs in CVDName
"""
nodeLst=fCVDNodesFromName(CVDName)
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['NODEsName'].isin(nodeLst)][['pMin','pMinMlc']]
s=df.max()
return s.pMin
# --- Funktionen Allgemein
# -----------------------
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def genTimespans(timeStart
,timeEnd
,timeSpan=pd.Timedelta('12 Minutes')
,timeOverlap=pd.Timedelta('0 Seconds')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
):
# generates timeSpan-Sections
# if timeStart is
# an int, it is considered as the number of desired Sections before timeEnd; timeEnd must be a time
# a time, it is considered as timeStart
# if timeEnd is
# an int, it is considered as the number of desired Sections after timeStart; timeStart must be a time
# a time, it is considered as timeEnd
# if timeSpan is
# an int, it is considered as the number of desired Sections
# a time, it is considered as timeSpan
# returns an array of tuples
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
xlims=[]
try:
if type(timeStart) == int:
numOfDesiredSections=timeStart
timeStartEff=timeEnd+timeEndPostfix-numOfDesiredSections*timeSpan+(numOfDesiredSections-1)*timeOverlap-timeStartPraefix
else:
timeStartEff=timeStart-timeStartPraefix
logger.debug("{0:s}timeStartEff: {1:s}".format(logStr,str(timeStartEff)))
if type(timeEnd) == int:
numOfDesiredSections=timeEnd
timeEndEff=timeStart-timeStartPraefix+numOfDesiredSections*timeSpan-(numOfDesiredSections-1)*timeOverlap+timeEndPostfix
else:
timeEndEff=timeEnd+timeEndPostfix
logger.debug("{0:s}timeEndEff: {1:s}".format(logStr,str(timeEndEff)))
if type(timeSpan) == int:
numOfDesiredSections=timeSpan
dt=timeEndEff-timeStartEff
timeSpanEff=dt/numOfDesiredSections+(numOfDesiredSections-1)*timeOverlap
else:
timeSpanEff=timeSpan
logger.debug("{0:s}timeSpanEff: {1:s}".format(logStr,str(timeSpanEff)))
logger.debug("{0:s}timeOverlap: {1:s}".format(logStr,str(timeOverlap)))
timeStartAct = timeStartEff
while timeStartAct < timeEndEff:
logger.debug("{0:s}timeStartAct: {1:s}".format(logStr,str(timeStartAct)))
timeEndAct=timeStartAct+timeSpanEff
xlim=(timeStartAct,timeEndAct)
xlims.append(xlim)
timeStartAct = timeEndAct - timeOverlap
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def gen2Timespans(
timeStart # Anfang eines "Prozesses"
,timeEnd # Ende eines "Prozesses"
,timeSpan=pd.Timedelta('12 Minutes')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
,roundStr=None # i.e. '5min': timeStart.round(roundStr) und timeEnd dito
):
"""
erzeugt 2 gleich lange Zeitbereiche
1 um timeStart herum
1 um timeEnd herum
"""
#print("timeStartPraefix: {:s}".format(str(timeStartPraefix)))
#print("timeEndPostfix: {:s}".format(str(timeEndPostfix)))
xlims=[]
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if roundStr != None:
timeStart=timeStart.round(roundStr)
timeEnd=timeEnd.round(roundStr)
xlims.append((timeStart-timeStartPraefix,timeStart-timeStartPraefix+timeSpan))
xlims.append((timeEnd+timeEndPostfix-timeSpan,timeEnd+timeEndPostfix))
return xlims
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def fTotalTimeFromPairs(
x
,denominator=None # i.e. pd.Timedelta('1 minute') for totalTime in Minutes
,roundToInt=True # round to and return as int if denominator is specified; else td is rounded by 2
):
tdTotal=pd.Timedelta('0 seconds')
for idx,tPairs in enumerate(x):
t1,t2=tPairs
if idx==0:
tLast=t2
else:
if t1 <= tLast:
print("Zeitpaar überlappt?!")
td=t2-t1
if td < pd.Timedelta('1 seconds'):
pass
#print("Zeitpaar < als 1 Sekunde?!")
tdTotal=tdTotal+td
if denominator==None:
return tdTotal
else:
td=tdTotal / denominator
if roundToInt:
td=int(round(td,0))
else:
td=round(td,2)
return td
def findAllTimeIntervalls(
df
,fct=lambda row: True if row['col'] == 46 else False
,tdAllowed=None
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
rows,cols=df.shape
if df.empty:
logger.debug("{:s}df ist leer".format(logStr))
elif rows == 1:
logger.debug("{:s}df hat nur 1 Zeile: {:s}".format(logStr,df.to_string()))
rowValue=fct(df.iloc[0])
if rowValue:
tPair=(df.index[0],df.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
# paarweise über alle Zeilen
for (i1, row1), (i2, row2) in pairwise(df.iterrows()):
row1Value=fct(row1)
row2Value=fct(row2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not row1Value and row2Value:
tEin=i2
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif row1Value and not row2Value:
if tEin != None:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
pass # sonst: Bed. ist jetzt Aus und war nicht Ein
# Bed. kann nur im ersten Fall Ein gehen
# wenn 1 x und 2 x
elif row1Value and row2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# letztes Paar
if row1Value and row2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
if tdAllowed != None:
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def findAllTimeIntervallsSeries(
s=pd.Series()
,fct=lambda x: True if x == 46 else False
,tdAllowed=None # if not None all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=True
):
"""
# if fct:
# alle [Zeitbereiche] finden fuer die fct Wahr ist; diese Zeitbereiche werden geliefert; es werden nur Paare geliefert; Wahr-Solitäre gehen nicht verloren sondern werden als Paar (t,t) geliefert
# Wahr-Solitäre sind NUR dann enthalten, wenn s nur 1 Wert enthält und dieser Wahr ist; das 1 gelieferte Paar enthaelt dann den Solitär-Zeitstempel für beide Zeiten
# tdAllowed can be be specified
# dann im Anschluss in Zeitbereiche zusammenfassen, die nicht mehr als tdAllowed auseinander liegen; diese Zeitbereiche werden dann geliefert
# if fct None:
# tdAllowed must be specified
# in Zeitbereiche zerlegen, die nicht mehr als Schwellwert tdAllowed auseinander liegen; diese Zeitbereiche werden geliefert
# generell hat jeder gelieferte Zeitbereich Anfang und Ende (d.h. 2 Zeiten), auch dann, wenn dadurch ein- oder mehrfach der Schwellwert ignoriert werden muss
# denn es soll kein Zeitbereich verloren gehen, der in s enthalten ist
# wenn s nur 1 Wert enthält, wird 1 Zeitpaar mit demselben Zeitstempel für beide Zeiten geliefert, wenn Wert nicht Null
# returns array of Time-Pair-Tuples
>>> import pandas as pd
>>> t=pd.Timestamp('2021-03-19 01:02:00')
>>> t1=t +pd.Timedelta('1 second')
>>> t2=t1+pd.Timedelta('1 second')
>>> t3=t2+pd.Timedelta('1 second')
>>> t4=t3+pd.Timedelta('1 second')
>>> t5=t4+pd.Timedelta('1 second')
>>> t6=t5+pd.Timedelta('1 second')
>>> t7=t6+pd.Timedelta('1 second')
>>> d = {t1: 46, t2: 0} # geht aus - kein Paar
>>> s1PaarGehtAus=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 46} # geht ein - kein Paar
>>> s1PaarGehtEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t5: 46, t6: 0} # geht ausE - kein Paar
>>> s1PaarGehtAusE=pd.Series(data=d, index=[t5, t6])
>>> d = {t5: 0, t6: 46} # geht einE - kein Paar
>>> s1PaarGehtEinE=pd.Series(data=d, index=[t5, t6])
>>> d = {t1: 46, t2: 46} # geht aus - ein Paar
>>> s1PaarEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 0} # geht aus - kein Paar
>>> s1PaarAus=pd.Series(data=d, index=[t1, t2])
>>> s2PaarAus=pd.concat([s1PaarGehtAus,s1PaarGehtAusE])
>>> s2PaarEin=pd.concat([s1PaarGehtEin,s1PaarGehtEinE])
>>> s2PaarAusEin=pd.concat([s1PaarGehtAus,s1PaarGehtEinE])
>>> s2PaarEinAus=pd.concat([s1PaarGehtEin,s1PaarGehtAusE])
>>> # 1 Wert
>>> d = {t1: 46} # 1 Wert - Wahr
>>> s1WertWahr=pd.Series(data=d, index=[t1])
>>> d = {t1: 44} # 1 Wert - Falsch
>>> s1WertFalsch=pd.Series(data=d, index=[t1])
>>> d = {t1: None} # 1 Wert - None
>>> s1WertNone=pd.Series(data=d, index=[t1])
>>> ###
>>> # 46 0
>>> # 0 46
>>> # 0 0
>>> # 46 46 !1 Paar
>>> # 46 0 46 0
>>> # 46 0 0 46
>>> # 0 46 0 46
>>> # 0 46 46 0 !1 Paar
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus)
[]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin)
[]
>>> findAllTimeIntervallsSeries(s1PaarEin)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarAusEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarEinAus)
[(Timestamp('2021-03-19 01:02:02'), Timestamp('2021-03-19 01:02:05'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertFalsch)
[]
>>> ###
>>> # 46 0 !1 Paar
>>> # 0 46 !1 Paar
>>> # 0 0 !1 Paar
>>> # 46 46 !1 Paar
>>> # 46 0 46 0 !2 Paare
>>> # 46 0 0 46 !2 Paare
>>> # 0 46 0 46 !2 Paare
>>> # 0 46 46 0 !2 Paare
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s2PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarAusEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEinAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr,fct=None)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertNone,fct=None)
[]
>>> ###
>>> d = {t1: 0, t3: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t1, t3])
>>> findAllTimeIntervallsSeries(s1PaarmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:03'))]
>>> d = {t4: 0, t5: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t4, t5])
>>> s2PaarmZoZ=pd.concat([s1PaarmZ,s1PaaroZ])
>>> findAllTimeIntervallsSeries(s2PaarmZoZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t1: 0, t2: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t1, t2])
>>> d = {t3: 0, t5: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t3, t5])
>>> s2PaaroZmZ=pd.concat([s1PaaroZ,s1PaarmZ])
>>> findAllTimeIntervallsSeries(s2PaaroZmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t6: 0, t7: 0}
>>> s1PaaroZ2=pd.Series(data=d, index=[t6, t7])
>>> d = {t4: 0}
>>> solitaer=pd.Series(data=d, index=[t4])
>>> s5er=pd.concat([s1PaaroZ,solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s5er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
>>> s3er=pd.concat([s1PaaroZ,solitaer])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:04'))]
>>> s3er=pd.concat([solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
if s.empty:
logger.debug("{:s}Series {!s:s} ist leer".format(logStr,s.name))
elif s.size == 1:
logger.debug("{:s}Series {!s:s} hat nur 1 Element: {:s}".format(logStr,s.name,s.to_string()))
if fct != None:
# 1 Paar mit selben Zeiten wenn das 1 Element Wahr
sValue=fct(s.iloc[0])
if sValue:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
# 1 Paar mit selben Zeiten wenn das 1 Element nicht None
sValue=s.iloc[0]
if sValue != None:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
if fct != None:
# paarweise über alle Zeiten
for idx,((i1, s1), (i2, s2)) in enumerate(pairwise(s.iteritems())):
s1Value=fct(s1)
s2Value=fct(s2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not s1Value and s2Value:
tEin=i2
if idx > 0: # Info
pass
else:
# beim ersten Paar "geht Ein"
pass
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif s1Value and not s2Value:
if tEin != None:
if tEin<i1:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
# singulaeres Ereignis
# Paar mit selben Zeiten
tPair=(tEin,i1)
tPairs.append(tPair)
pass
else: # geht Aus ohne Ein zu sein
if idx > 0: # Info
pass
else:
# im ersten Paar
pass
# wenn 1 x und 2 x
elif s1Value and s2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# Behandlung letztes Paar
# bleibt Ein am Ende der Series: Paar speichern
if s1Value and s2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
# Behandlung tdAllowed
if tdAllowed != None:
if debugOutput:
logger.debug("{:s}Series {!s:s}: Intervalle werden mit {!s:s} zusammengefasst ...".format(logStr,s.name,tdAllowed))
tPairsOld=tPairs.copy()
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed,debugOutput=debugOutput)
if debugOutput:
tPairsZusammengefasst=sorted(list(set(tPairsOld) - set(tPairs)))
if len(tPairsZusammengefasst)>0:
logger.debug("{:s}Series {!s:s}: Intervalle wurden wg. {!s:s} zusammengefasst. Nachfolgend die zusgefassten Intervalle: {!s:s}. Sowie die entsprechenden neuen: {!s:s}".format(
logStr
,s.name
,tdAllowed
,tPairsZusammengefasst
,sorted(list(set(tPairs) - set(tPairsOld)))
))
else:
# paarweise über alle Zeiten
# neues Paar beginnen
anzInPair=1 # Anzahl der Zeiten in aktueller Zeitspanne
for (i1, s1), (i2, s2) in pairwise(s.iteritems()):
td=i2-i1
if td > tdAllowed: # Zeit zwischen 2 Zeiten > als Schwelle: Zeitspanne ist abgeschlossen
if tEin==None:
# erstes Paar liegt bereits > als Schwelle auseinander
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
# aktuelle Zeitspanne beginnt beim 1. Wert und geht über Schwellwert
tEin=i1
anzInPair=2
else:
if anzInPair>=2:
# Zeitspanne abschließen
tPair=(tEin,i1)
tPairs.append(tPair)
# neue Zeitspanne beginnen
tEin=i2
anzInPair=1
else:
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
anzInPair=2
else: # Zeitspanne zugelassen, weiter ...
if tEin==None:
tEin=i1
anzInPair=anzInPair+1
# letztes Zeitpaar behandeln
if anzInPair>=2:
tPair=(tEin,i2)
tPairs.append(tPair)
else:
# ein letzter Wert wuerde ueber bleiben, letzte Zeitspanne verlängern ...
tPair=tPairs[-1]
tPair=(tPair[0],i2)
tPairs[-1]=tPair
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def fCombineSubsequenttPairs(
tPairs
,tdAllowed=pd.Timedelta('1 second') # all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=False
):
# returns tPairs
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
for idx,(tp1,tp2) in enumerate(pairwise(tPairs)):
t1Ende=tp1[1]
t2Start=tp2[0]
if t2Start-t1Ende <= tdAllowed:
if debugOutput:
logger.debug("{:s} t1Ende: {!s:s} t2Start: {!s:s} Gap: {!s:s}".format(logStr,t1Ende,t2Start,t2Start-t1Ende))
tPairs[idx]=(tp1[0],tp2[1]) # Folgepaar in vorheriges Paar integrieren
tPairs.remove(tp2) # Folgepaar löschen
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed) # Rekursion
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
class RmError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
# --- Parameter und Funktionen LDS Reports
# ----------------------------------------
def pltMakeCategoricalColors(color,nOfSubColorsReq=3,reversedOrder=False):
"""
Returns an array of rgb colors derived from color.
Parameter:
color: a rgb color
nOfSubColorsReq: number of SubColors requested
Raises:
RmError
>>> import matplotlib
>>> color='red'
>>> c=list(matplotlib.colors.to_rgb(color))
>>> import Rm
>>> Rm.pltMakeCategoricalColors(c)
array([[1. , 0. , 0. ],
[1. , 0.375, 0.375],
[1. , 0.75 , 0.75 ]])
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
rgb=None
try:
chsv = matplotlib.colors.rgb_to_hsv(color[:3])
arhsv = np.tile(chsv,nOfSubColorsReq).reshape(nOfSubColorsReq,3)
arhsv[:,1] = np.linspace(chsv[1],0.25,nOfSubColorsReq)
arhsv[:,2] = np.linspace(chsv[2],1,nOfSubColorsReq)
rgb = matplotlib.colors.hsv_to_rgb(arhsv)
if reversedOrder:
rgb=list(reversed(rgb))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return rgb
# Farben fuer Druecke
SrcColorp='green'
SrcColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorp)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorp='blue'
SnkColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorp)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
# Farben fuer Fluesse
SrcColorQ='red'
SrcColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorQ)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorQ='orange'
SnkColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorQ)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
lwBig=4.5
lwSmall=2.5
attrsDct={ 'p Src':{'color':SrcColorp,'lw':lwBig,'where':'post'}
,'p Snk':{'color':SnkColorp,'lw':lwSmall+1.,'where':'post'}
,'p Snk 2':{'color':'mediumorchid','where':'post'}
,'p Snk 3':{'color':'darkviolet','where':'post'}
,'p Snk 4':{'color':'plum','where':'post'}
,'Q Src':{'color':SrcColorQ,'lw':lwBig,'where':'post'}
,'Q Snk':{'color':SnkColorQ,'lw':lwSmall+1.,'where':'post'}
,'Q Snk 2':{'color':'indianred','where':'post'}
,'Q Snk 3':{'color':'coral','where':'post'}
,'Q Snk 4':{'color':'salmon','where':'post'}
,'Q Src RTTM':{'color':SrcColorQ,'lw':matplotlib.rcParams['lines.linewidth']+1.,'ls':'dotted','where':'post'}
,'Q Snk RTTM':{'color':SnkColorQ,'lw':matplotlib.rcParams['lines.linewidth'] ,'ls':'dotted','where':'post'}
,'Q Snk 2 RTTM':{'color':'indianred','ls':'dotted','where':'post'}
,'Q Snk 3 RTTM':{'color':'coral','ls':'dotted','where':'post'}
,'Q Snk 4 RTTM':{'color':'salmon','ls':'dotted','where':'post'}
,'p ISrc 1':{'color':SrcColorsp[-1],'ls':'dashdot','where':'post'}
,'p ISrc 2':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 3':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISrc 4':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 5':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 6':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISnk 1':{'color':SnkColorsp[0],'ls':'dashdot','where':'post'}
,'p ISnk 2':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 3':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISnk 4':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 5':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 6':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'Q xSrc 1':{'color':SrcColorsQ[-1],'ls':'dashdot','where':'post'}
,'Q xSrc 2':{'color':SrcColorsQ[-2],'ls':'dashdot','where':'post'}
,'Q xSrc 3':{'color':SrcColorsQ[-3],'ls':'dashdot','where':'post'}
,'Q xSnk 1':{'color':SnkColorsQ[0],'ls':'dashdot','where':'post'}
,'Q xSnk 2':{'color':SnkColorsQ[1],'ls':'dashdot','where':'post'}
,'Q xSnk 3':{'color':SnkColorsQ[2],'ls':'dashdot','where':'post'}
,'Q (DE) Me':{'color': 'indigo','ls': 'dashdot','where': 'post','lw':1.5}
,'Q (DE) Re':{'color': 'cyan','ls': 'dashdot','where': 'post','lw':3.5}
,'p (DE) SS Me':{'color': 'magenta','ls': 'dashdot','where': 'post'}
,'p (DE) DS Me':{'color': 'darkviolet','ls': 'dashdot','where': 'post'}
,'p (DE) SS Re':{'color': 'magenta','ls': 'dotted','where': 'post'}
,'p (DE) DS Re':{'color': 'darkviolet','ls': 'dotted','where': 'post'}
,'p OPC LDSErgV':{'color':'olive'
,'lw':lwSmall-.5
,'ms':matplotlib.rcParams['lines.markersize']
,'marker':'x'
,'mec':'olive'
,'mfc':'olive'
,'where':'post'}
,'p OPC Src':{'color':SrcColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorp
,'mfc':SrcColorQ
,'where':'post'}
,'p OPC Snk':{'color':SnkColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorp
,'mfc':SnkColorQ
,'where':'post'}
,'Q OPC Src':{'color':SrcColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorQ
,'mfc':SrcColorp
,'where':'post'}
,'Q OPC Snk':{'color':SnkColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorQ
,'mfc':SnkColorp
,'where':'post'}
}
attrsDctLDS={
'Seg_AL_S_Attrs':{'color':'blue','lw':3.,'where':'post'}
,'Druck_AL_S_Attrs':{'color':'blue','lw':3.,'ls':'dashed','where':'post'}
,'Seg_MZ_AV_Attrs':{'color':'orange','zorder':3,'where':'post'}
,'Druck_MZ_AV_Attrs':{'color':'orange','zorder':3,'ls':'dashed','where':'post'}
,'Seg_LR_AV_Attrs':{'color':'green','zorder':1,'where':'post'}
,'Druck_LR_AV_Attrs':{'color':'green','zorder':1,'ls':'dashed','where':'post'}
,'Seg_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'where':'post'}
,'Druck_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'ls':'dashed','where':'post'}
,'Seg_NG_AV_Attrs':{'color':'red','zorder':2,'where':'post'}
,'Druck_NG_AV_Attrs':{'color':'red','zorder':2,'ls':'dashed','where':'post'}
,'Seg_SB_S_Attrs':{'color':'black','alpha':.5,'where':'post'}
,'Druck_SB_S_Attrs':{'color':'black','ls':'dashed','alpha':.75,'where':'post','lw':1.0}
,'Seg_AC_AV_Attrs':{'color':'indigo','where':'post'}
,'Druck_AC_AV_Attrs':{'color':'indigo','ls':'dashed','where':'post'}
,'Seg_ACF_AV_Attrs':{'color':'blueviolet','where':'post','lw':1.0}
,'Druck_ACF_AV_Attrs':{'color':'blueviolet','ls':'dashed','where':'post','lw':1.0}
,'Seg_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_TIMER_AV_Attrs':{'color':'chartreuse','where':'post'}
,'Druck_TIMER_AV_Attrs':{'color':'chartreuse','ls':'dashed','where':'post'}
,'Seg_AM_AV_Attrs':{'color':'chocolate','where':'post'}
,'Druck_AM_AV_Attrs':{'color':'chocolate','ls':'dashed','where':'post'}
#
,'Seg_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_DPDT_AV_Attrs':{'color':'fuchsia','where':'post','lw':2.0}
,'Druck_DPDT_AV_Attrs':{'color':'fuchsia','ls':'dashed','where':'post','lw':2.0}
,'Seg_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[6][1],'where':'post','lw':1.0} # 'loosely dashdotted'
,'Druck_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[10][1],'where':'post','lw':1.0} # 'loosely dashdotdotted'
}
pSIDEvents=re.compile('(?P<Prae>IMDI\.)?Objects\.(?P<colRegExMiddle>3S_FBG_ESCHIEBER|FBG_ESCHIEBER{1})\.(3S_)?(?P<colRegExSchieberID>[a-z,A-Z,0-9,_]+)\.(?P<colRegExEventID>(In\.ZUST|In\.LAEUFT|In\.LAEUFT_NICHT|In\.STOER|Out\.AUF|Out\.HALT|Out\.ZU)$)')
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
eventCCmds={ 'Out.AUF':0
,'Out.ZU':1
,'Out.HALT':2}
eventCStats={'In.LAEUFT':3
,'In.LAEUFT_NICHT':4
,'In.ZUST':5
,'Out.AUF':6
,'Out.ZU':7
,'Out.HALT':8
,'In.STOER':9}
valRegExMiddleCmds='3S_FBG_ESCHIEBER' # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
LDSParameter=[
'ACC_SLOWTRANSIENT'
,'ACC_TRANSIENT'
,'DESIGNFLOW'
,'DT'
,'FILTERWINDOW'
#,'L_PERCENT'
,'L_PERCENT_STDY'
,'L_PERCENT_STRAN'
,'L_PERCENT_TRANS'
,'L_SHUTOFF'
,'L_SLOWTRANSIENT'
,'L_SLOWTRANSIENTQP'
,'L_STANDSTILL'
,'L_STANDSTILLQP'
,'L_TRANSIENT'
,'L_TRANSIENTQP'
,'L_TRANSIENTVBIGF'
,'L_TRANSIENTPDNTF'
,'MEAN'
,'NAME'
,'ORDER'
,'TIMER'
,'TTIMERTOALARM'
,'TIMERTOLISS'
,'TIMERTOLIST'
]
LDSParameterDataD={
'ACC_SLOWTRANSIENT':0.1
,'ACC_TRANSIENT':0.8
,'DESIGNFLOW':250.
,'DT':1
,'FILTERWINDOW':180
#,'L_PERCENT':1.6
,'L_PERCENT_STDY':1.6
,'L_PERCENT_STRAN':1.6
,'L_PERCENT_TRANS':1.6
,'L_SHUTOFF':2.
,'L_SLOWTRANSIENT':4.
,'L_SLOWTRANSIENTQP':4.
,'L_STANDSTILL':2.
,'L_STANDSTILLQP':2.
,'L_TRANSIENT':10.
,'L_TRANSIENTQP':10.
,'L_TRANSIENTVBIGF':3.
,'L_TRANSIENTPDNTF':1.5
,'MEAN':1
,'ORDER':1
,'TIMER':180
,'TTIMERTOALARM':45 # TIMER/4
,'TIMERTOLISS':180
,'TIMERTOLIST':180
,'NAME':''
}
def fSEGNameFromPV_2(Beschr):
# fSEGNameFromSWVTBeschr
# 2,3,4,5
if Beschr in ['',None]:
return None
m=re.search(Lx.pID,Beschr)
if m == None:
return Beschr
return m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')
def fSEGNameFromPV_3(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
def fSEGNameFromPV_3m(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
#print("C4: {:s} C6: {:s}".format(m.group('C4'),m.group('C6')))
if m.group('C4')=='AAD' and m.group('C6')=='_OHN':
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_OHV1'
elif m.group('C4')=='OHN' and m.group('C6')=='_NGD':
return m.group('C3')+'_'+'OHV2'+'_'+m.group('C5')+m.group('C6')
else:
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
# Ableitung eines DIVPipelineNamens von PV
def fDIVNameFromPV(PV):
m=re.search(Lx.pID,PV)
return m.group('C2')+'-'+m.group('C4')
# Ableitung eines DIVPipelineNamens von SEGName
def fDIVNameFromSEGName(SEGName):
if pd.isnull(SEGName):
return None
# dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
m=re.search('(\d+)_(\w+)_(\w+)_(\w+)',SEGName)
if m == None:
return SEGName
return m.group(1)+'_'+m.group(3)
#def getNamesFromOPCITEM_ID(dfSegsNodesNDataDpkt
# ,OPCITEM_ID):
# """
# Returns tuple (DIVPipelineName,SEGName) from OPCITEM_ID PH
# """
# df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['OPCITEM_ID']==OPCITEM_ID]
# if not df.empty:
# return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def fGetBaseIDFromResID(
ID='Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.MW.value'
):
"""
Returns 'Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.'
funktioniert im Prinzip fuer SEG- und Druck-Ergs: jede Erg-PV eines Vektors liefert die Basis gueltig fuer alle Erg-PVs des Vektors
d.h. die Erg-PVs eines Vektors unterscheiden sich nur hinten
siehe auch fGetSEGBaseIDFromSEGName
"""
if pd.isnull(ID):
return None
m=re.search(Lx.pID,ID)
if m == None:
return None
try:
base=m.group('A')+'.'+m.group('B')\
+'.'+m.group('C1')\
+'_'+m.group('C2')\
+'_'+m.group('C3')\
+'_'+m.group('C4')\
+'_'+m.group('C5')\
+m.group('C6')
#print(m.groups())
#print(m.groupdict())
if 'C7' in m.groupdict().keys():
if m.group('C7') != None:
base=base+m.group('C7')
base=base+'.'+m.group('D')\
+'.'
#print(base)
except:
base=m.group(0)+' (Fehler in fGetBaseIDFromResID)'
return base
def fGetSEGBaseIDFromSEGName(
SEGName='6_AAD_41_OHV1'
):
"""
Returns 'Objects.3S_FBG_SEG_INFO.3S_L_'+SEGName+'.In.'
In some cases SEGName is manipulated ...
siehe auch fGetBaseIDFromResID
"""
if SEGName == '6_AAD_41_OHV1':
x='6_AAD_41_OHN'
elif SEGName == '6_OHV2_41_NGD':
x='6_OHN_41_NGD'
else:
x=SEGName
return 'Objects.3S_FBG_SEG_INFO.3S_L_'+x+'.In.'
def getNamesFromSEGResIDBase(dfSegsNodesNDataDpkt
,SEGResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName) from SEGResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==SEGResIDBase]
if not df.empty:
return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt
,DruckResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) from DruckResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase']==DruckResIDBase]
if not df.empty:
#return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0],df['SEGResIDBase'].iloc[0])
tupleLst=[]
for index,row in df.iterrows():
tupleItem=(row['DIVPipelineName'],row['SEGName'],row['SEGResIDBase'],row['SEGOnlyInLDSPara'])
tupleLst.append(tupleItem)
return tupleLst
else:
return []
def fGetErgIDsFromBaseID(
baseID='Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_BUA.In.'
,dfODI=pd.DataFrame() # df mit ODI Parametrierungsdaten
,strSep=' '
,patternPat='^IMDI.' #
,pattern=True # nur ergIDs, fuer die 2ndPatternPat zutrifft liefern
):
"""
returns string
mit strSep getrennten IDs aus dfODI, welche baseID enthalten (und bei pattern WAHR patternPat matchen)
baseID (und group(0) von patternPat bei pattern WAHR) sind in den IDs entfernt
"""
if baseID in [None,'']:
return None
df=dfODI[dfODI.index.str.contains(baseID)]
if df.empty:
return None
if pattern:
ergIDs=''.join([e.replace(baseID,'').replace(re.search(patternPat,e).group(0),'')+' ' for e in df.index if re.search(patternPat,e) != None])
else:
ergIDs=''.join([e.replace(baseID,'')+' ' for e in df.index if re.search(patternPat,e) == None])
return ergIDs
def dfSegsNodesNDataDpkt(
VersionsDir=r"C:\3s\Projekte\Projekt\04 - Versionen\Version82.3"
,Model=r"MDBDOC\FBG.mdb" # a Access Model
,am=None # a Access Model already processed
,SEGsDefPattern='(?P<SEG_Ki>\S+)~(?P<SEG_Kk>\S+)$' # RSLW-Beschreibung: liefert die Knotennamen der Segmentdefinition ()
,RIDefPattern='(?P<Prae>\S+)\.(?P<Post>RICHT.S)$' # SWVT-Beschreibung (RICHT-DP): liefert u.a. SEGName
,fSEGNameFromPV_2=fSEGNameFromPV_2 # Funktion, die von SWVT-Beschreibung (RICHT-DP) u.a. SEGName liefert
,fGetBaseIDFromResID=fGetBaseIDFromResID # Funktion, die von OPCITEM-ID des PH-Kanals eines KNOTens den Wortstamm der Knotenergebnisse liefert
,fGetSEGBaseIDFromSEGName=fGetSEGBaseIDFromSEGName # Funktion, die aus SEGName den Wortstamm der Segmentergebnisse liefert
,LDSPara=r"App LDS\Modelle\WDFBG\B1\V0\BZ1\LDS_Para.xml"
,LDSParaPT=r"App LDS\SirOPC\AppLDS_DPDTParams.csv"
,ODI=r"App LDS\SirOPC\AppLDS_ODI.csv"
,LDSParameter=LDSParameter
,LDSParameterDataD=LDSParameterDataD
):
"""
alle Segmente mit Pfaddaten (Kantenzuege) mit Kanten- und Knotendaten sowie Parametrierungsdaten
returns df:
DIVPipelineName
SEGName
SEGNodes (Ki~Kk; Schluessel in LDSPara)
SEGOnlyInLDSPara
NODEsRef
NODEsRef_max
NODEsSEGLfdNr
NODEsSEGLfdNrType
NODEsName
OBJTYPE
ZKOR
Blockname
ATTRTYPE (PH)
CLIENT_ID
OPCITEM_ID
NAME (der DPKT-Gruppe)
DruckResIDBase
SEGResIDBase
SEGResIDs
SEGResIDsIMDI
DruckResIDs
DruckResIDsIMDI
NODEsSEGDruckErgLfdNr
# LDSPara
ACC_SLOWTRANSIENT
ACC_TRANSIENT
DESIGNFLOW
DT
FILTERWINDOW
L_PERCENT_STDY
L_PERCENT_STRAN
L_PERCENT_TRANS
L_SHUTOFF
L_SLOWTRANSIENT
L_SLOWTRANSIENTQP
L_STANDSTILL
L_STANDSTILLQP
L_TRANSIENT
L_TRANSIENTPDNTF
L_TRANSIENTQP
L_TRANSIENTVBIGF
MEAN
ORDER
TIMER
TIMERTOLISS
TIMERTOLIST
TTIMERTOALARM
# LDSParaPT
#ID
pMin
DT_Vorhaltemass
TTimer_PMin
Faktor_PMin
MaxL_PMin
pMinMlc
pMinMlcMinSEG
pMinMlcMaxSEG
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfSegsNodesNDataDpkt=pd.DataFrame()
try:
###### --- LDSPara
LDSParaFile=os.path.join(VersionsDir,LDSPara)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSPara',LDSPara))
with open(LDSParaFile) as f:
xml = f.read()
xmlWellFormed='<root>'+xml+'</root>'
root=ET.fromstring(xmlWellFormed)
LDSParameterData={}
for key in LDSParameterDataD.keys():
LDSParameterData[key]=[]
logger.debug("{:s}LDSParameter: {!s:s}.".format(logStr,LDSParameter))
for idx,element in enumerate(root.iter(tag='LDSI')):
attribKeysMute=[]
for key,value in element.attrib.items():
if key not in LDSParameter:
logger.warning("{:s}{:s}: Parameter: {:s} undefiniert.".format(logStr,element.attrib['NAME'],key))
attribKeysMute.append(key)
keysIst=element.attrib.keys()
keysSoll=set(LDSParameter)
keysExplizitFehlend=keysSoll-keysIst
LDSIParaDct=element.attrib
for key in keysExplizitFehlend:
if key=='ORDER':
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
elif key=='TTIMERTOALARM':
LDSIParaDct[key]=int(LDSIParaDct['TIMER'])/4
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
else:
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
keyListToProcess=[key for key in LDSIParaDct.keys() if key not in attribKeysMute]
for key in keyListToProcess:
LDSParameterData[key].append(LDSIParaDct[key])
df=pd.DataFrame.from_dict(LDSParameterData)
df=df.set_index('NAME').sort_index()
df.index.rename('SEGMENT', inplace=True)
df=df[sorted(df.columns.to_list())]
df = df.apply(pd.to_numeric)
#logger.debug("{:s}df: {:s}".format(logStr,df.to_string()))
logger.debug("{:s}Parameter, die nicht auf Standardwerten sind:".format(logStr))
for index, row in df.iterrows():
for colName, colValue in zip(df.columns.to_list(),row):
if colValue != LDSParameterDataD[colName]:
logger.debug("Segment: {:30s}: Parameter: {:20s} Wert: {:10s} (Standard: {:s})".format(index,colName,str(colValue),str(LDSParameterDataD[colName])))
dfPara=df
# --- Einlesen Modell
if am == None:
accFile=os.path.join(VersionsDir,Model)
logger.info("{:s}###### {:10s}: {:s}: Lesen und verarbeiten ...".format(logStr,'Modell',Model))
am=Am.Am(accFile=accFile)
V_BVZ_RSLW=am.dataFrames['V_BVZ_RSLW']
V_BVZ_SWVT=am.dataFrames['V_BVZ_SWVT']
V3_KNOT=am.dataFrames['V3_KNOT']
V3_VBEL=am.dataFrames['V3_VBEL']
V3_DPKT=am.dataFrames['V3_DPKT']
V3_RSLW_SWVT=am.dataFrames['V3_RSLW_SWVT']
# --- Segmente ermitteln
# --- per Modell
SEGsDefinesPerRICHT=V3_RSLW_SWVT[
(V3_RSLW_SWVT['BESCHREIBUNG'].str.match(SEGsDefPattern).isin([True])) # Muster Ki~Kk ...
& #!
(V3_RSLW_SWVT['BESCHREIBUNG_SWVT'].str.match(RIDefPattern).isin([True])) # Muster Förderrichtungs-PV ...
].copy(deep=True)
SEGsDefinesPerRICHT=SEGsDefinesPerRICHT[['BESCHREIBUNG','BESCHREIBUNG_SWVT']]
# --- nur per LDS Para
lSEGOnlyInLDSPara=[str(SEGNodes) for SEGNodes in dfPara.index if str(SEGNodes) not in SEGsDefinesPerRICHT['BESCHREIBUNG'].values]
for SEGNodes in lSEGOnlyInLDSPara:
logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
# --- zusammenfassen
SEGsDefines=pd.concat([SEGsDefinesPerRICHT,pd.DataFrame(lSEGOnlyInLDSPara,columns=['BESCHREIBUNG'])])
# Knotennamen der SEGDef ergänzen
df=SEGsDefines['BESCHREIBUNG'].str.extract(SEGsDefPattern,expand=True)
dfCols=df.columns.to_list()
SEGsDefines=pd.concat([SEGsDefines,df],axis=1)
# ausduennen
SEGsDefines=SEGsDefines[dfCols+['BESCHREIBUNG_SWVT','BESCHREIBUNG']]
# sortieren
SEGsDefines=SEGsDefines.sort_values(by=['BESCHREIBUNG_SWVT','BESCHREIBUNG']).reset_index(drop=True)
# SEGName
SEGsDefines['BESCHREIBUNG_SWVT']=SEGsDefines.apply(lambda row: row['BESCHREIBUNG_SWVT'] if not pd.isnull(row['BESCHREIBUNG_SWVT']) else row['BESCHREIBUNG'] ,axis=1)
#print(SEGsDefines)
SEGsDefines['SEGName']=SEGsDefines['BESCHREIBUNG_SWVT'].apply(lambda x: fSEGNameFromPV_2(x))
# --- Segmentkantenzuege ermitteln
dfSegsNodeLst={} # nur zu Kontrollzwecken
dfSegsNode=[]
for index,row in SEGsDefines[~SEGsDefines[dfCols[-1]].isnull()].iterrows():
df=Xm.Xm.constructShortestPathFromNodeList(df=V3_VBEL.reset_index()
,sourceCol='NAME_i'
,targetCol='NAME_k'
,nl=[row[dfCols[0]],row[dfCols[-1]]]
,weight=None,query=None,fmask=None,filterNonQ0Rows=True)
s=pd.concat([pd.Series([row[dfCols[0]]]),df['nextNODE']])
s.name=row['SEGName']
dfSegsNodeLst[row['SEGName']]=s.reset_index(drop=True)
df2=pd.DataFrame(s.reset_index(drop=True)).rename(columns={s.name:'NODEs'})
df2['SEGName']=s.name
df2=df2[['SEGName','NODEs']]
sObj=pd.concat([pd.Series(['None']),df['OBJTYPE']])
sObj.name='OBJTYPE'
df3=pd.concat([df2,pd.DataFrame(sObj.reset_index(drop=True))],axis=1)
df4=df3.reset_index().rename(columns={'index':'NODEsLfdNr','NODEs':'NODEsName'})[['SEGName','NODEsLfdNr','NODEsName','OBJTYPE']]
df4['NODEsType']=df4.apply(lambda row: row['NODEsLfdNr'] if row['NODEsLfdNr'] < df4.index[-1] else -1, axis=1)
df4=df4[['SEGName','NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
df4['SEGNodes']=row[dfCols[0]]+'~'+row[dfCols[-1]]
dfSegsNode.append(df4)
dfSegsNodes=pd.concat(dfSegsNode).reset_index(drop=True)
# ---
dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes['NODEsRef']=dfSegsNodes.sort_values(
by=['NODEsName','SEGOnlyInLDSPara','NODEsType','SEGName']
,ascending=[True,True,False,True]).groupby(['NODEsName']).cumcount() + 1
dfSegsNodes=pd.merge(dfSegsNodes,dfSegsNodes.groupby(['NODEsName']).max(),left_on='NODEsName',right_index=True,suffixes=('','_max'))
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
dfSegsNodes=dfSegsNodes.rename(columns={'NODEsLfdNr':'NODEsSEGLfdNr','NODEsType':'NODEsSEGLfdNrType'})
### # ---
### dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsSEGLfdNr','NODEsSEGLfdNrType','NODEsName','OBJTYPE']]
# --- Knotendaten ergaenzen
dfSegsNodesNData=pd.merge(dfSegsNodes,V3_KNOT, left_on='NODEsName',right_on='NAME',suffixes=('','KNOT'))
dfSegsNodesNData=dfSegsNodesNData.filter(items=dfSegsNodes.columns.to_list()+['ZKOR','NAME_CONT','NAME_VKNO','pk'])
dfSegsNodesNData=dfSegsNodesNData.rename(columns={'NAME_CONT':'Blockname','NAME_VKNO':'Bl.Kn. fuer Block'})
# --- Knotendatenpunktdaten ergänzen
V3_DPKT_KNOT=pd.merge(V3_DPKT,V3_KNOT,left_on='fkOBJTYPE',right_on='pk',suffixes=('','_KNOT'))
V3_DPKT_KNOT_PH=V3_DPKT_KNOT[V3_DPKT_KNOT['ATTRTYPE'].isin(['PH'])]
# Mehrfacheintraege sollte es nicht geben ...
# V3_DPKT_KNOT_PH[V3_DPKT_KNOT_PH.duplicated(subset=['fkOBJTYPE'])]
df=pd.merge(dfSegsNodesNData,V3_DPKT_KNOT_PH,left_on='pk',right_on='fkOBJTYPE',suffixes=('','_DPKT'),how='left')
cols=dfSegsNodesNData.columns.to_list()
cols.remove('pk')
df=df.filter(items=cols+['ATTRTYPE','CLIENT_ID','OPCITEM_ID','NAME'])
dfSegsNodesNDataDpkt=df
#dfSegsNodesNDataDpkt
# ---
colList=dfSegsNodesNDataDpkt.columns.to_list()
dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fDIVNameFromSEGName(x))
### dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.filter(items=['DIVPipelineName']+colList)
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.sort_values(by=['DIVPipelineName','SEGName','NODEsSEGLfdNr']).reset_index(drop=True)
dfSegsNodesNDataDpkt['DruckResIDBase']=dfSegsNodesNDataDpkt['OPCITEM_ID'].apply(lambda x: fGetBaseIDFromResID(x) )
dfSegsNodesNDataDpkt['SEGResIDBase']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fGetSEGBaseIDFromSEGName(x) )
###### --- ODI
ODIFile=os.path.join(VersionsDir,ODI)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'ODI',ODI))
dfODI=Lx.getDfFromODI(ODIFile)
dfSegsNodesNDataDpkt['SEGResIDs']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['SEGResIDsIMDI']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
dfSegsNodesNDataDpkt['DruckResIDs']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['DruckResIDsIMDI']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
# --- lfd. Nr. der Druckmessstelle im Segment ermitteln
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase'].notnull()].copy()
df['NODEsSEGDruckErgLfdNr']=df.groupby('SEGName').cumcount() + 1
df['NODEsSEGDruckErgLfdNr']=df['NODEsSEGDruckErgLfdNr'].astype(int)
cols=dfSegsNodesNDataDpkt.columns.to_list()
cols.append('NODEsSEGDruckErgLfdNr')
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt
,df
,left_index=True
,right_index=True
,how='left'
,suffixes=('','_df')
).filter(items=cols)
dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr']=dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr'].astype(int,errors='ignore')
# LDSPara ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfPara,left_on='SEGNodes',right_index=True,suffixes=('','_LDSPara'),how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
#for SEGNodes in [str(SEGNodes) for SEGNodes in df.index if str(SEGNodes) not in dfSegsNodesNDataDpkt['SEGNodes'].values]:
# logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
###### --- LDSParaPT
LDSParaPTFile=os.path.join(VersionsDir,LDSParaPT)
if os.path.exists(LDSParaPTFile):
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSParaPT',LDSParaPT))
dfDPDTParams=pd.read_csv(LDSParaPTFile,delimiter=';',error_bad_lines=False,warn_bad_lines=True)
dfMehrfach=dfDPDTParams.groupby(by='#ID').filter(lambda x: len(x) > 1)
rows,cols=dfMehrfach.shape
if rows > 0:
logger.warning("{:s}Mehrfachkonfigurationen:".format(logStr))
logger.warning("{:s}".format(dfMehrfach.to_string()))
dfDPDTParams=dfDPDTParams.groupby(by='#ID').first()
# LDSParaPT ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfDPDTParams,left_on='CLIENT_ID',right_on='#ID',how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfOhne=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID']].reset_index(drop=True)
rows,cols=dfOhne.shape
if rows > 0:
logger.debug("{:s}Druckmessstellen ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(dfOhne.to_string()))
dfSegsNodesNDataDpkt['pMinMlc']=dfSegsNodesNDataDpkt.apply(lambda row: row['ZKOR']+row['pMin']*100000/(794.*9.81),axis=1)
g=dfSegsNodesNDataDpkt.groupby(by='SEGName')
df=g.pMinMlc.agg(pMinMlcMinSEG=np.min,pMinMlcMaxSEG=np.max)
# pMinMlcMinSEG, pMinMlcMaxSEG ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,df,left_on='SEGName',right_index=True,how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
# Segmente ohne Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt.groupby(['SEGName']).first()
df=df[pd.isnull(df['pMinMlcMinSEG'])][['DIVPipelineName','SEGNodes']]
rows,cols=df.shape
if rows > 0:
logger.debug("{:s}ganze Segmente ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(df.to_string()))
# Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (~pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID','pMin']].reset_index(drop=True)
logger.debug("{:s}dfSegsNodesNDataDpkt: Mindestdrücke: {!s:s}".format(logStr,df.to_string()))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfSegsNodesNDataDpkt
def fResValidSeriesSTAT_S(x): # STAT_S
if pd.isnull(x)==False:
if x >=0:
return True
else:
return False
else:
return False
def fResValidSeriesSTAT_S601(x): # STAT_S
if pd.isnull(x)==False:
if x==601:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S(x,value=20): # AL_S
if pd.isnull(x)==False:
if x==value:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S10(x):
return fResValidSeriesAL_S(x,value=10)
def fResValidSeriesAL_S4(x):
return fResValidSeriesAL_S(x,value=4)
def fResValidSeriesAL_S3(x):
return fResValidSeriesAL_S(x,value=3)
ResChannelFunctions=[fResValidSeriesSTAT_S,fResValidSeriesAL_S,fResValidSeriesSTAT_S601]
ResChannelResultNames=['Zustaendig','Alarm','Stoerung']
ResChannelTypes=['STAT_S','AL_S','STAT_S']
# (fast) alle verfuegbaren Erg-Kanaele
ResChannelTypesAll=['AL_S','STAT_S','SB_S','MZ_AV','LR_AV','NG_AV','LP_AV','AC_AV','ACCST_AV','ACCTR_AV','ACF_AV','TIMER_AV','AM_AV','DNTD_AV','DNTP_AV','DPDT_AV'
,'DPDT_REF_AV'
,'DPDT_REF' # Workaround
,'QM_AV','ZHKNR_S']
baseColorsSchieber=[ # Schieberfarben
'g' # 1
,'b' # 2
,'m' # 3
,'r' # 4
,'c' # 5
# alle Basisfarben außer y gelb
,'tab:blue' # 6
,'tab:orange' # 7
,'tab:green' # 8
,'tab:red' # 9
,'tab:purple' # 10
,'tab:brown' # 11
,'tab:pink' # 12
,'gold' # 13
,'fuchsia' # 14
,'coral' # 15
]
markerDefSchieber=[ # Schiebersymobole
'^' # 0 Auf
,'v' # 1 Zu
,'>' # 2 Halt
# ab hier Zustaende
,'4' # 3 Laeuft
,'3' # 4 Laeuft nicht
,'P' # 5 Zust
,'1' # 6 Auf
,'2' # 7 Zu
,'+' # 8 Halt
,'x' # 9 Stoer
]
# --- Reports LDS: Funktionen und Hilfsfunktionen
# -----------------------------------------------
def getLDSResVecDf(
ResIDBase='ID.' # i.e. for Segs Objects.3S_XYZ_SEG_INFO.3S_L_6_EL1_39_TUD.In. / i.e. for Drks Objects.3S_XYZ_DRUCK.3S_6_EL1_39_PTI_02_E.In.
,LDSResBaseType='SEG' # or Druck
,lx=None
,timeStart=None,timeEnd=None
,ResChannelTypes=ResChannelTypesAll
,timeShiftPair=None
):
"""
returns a df: the specified LDSResChannels (AL_S, ...) for an ResIDBase
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfResVec=pd.DataFrame()
try:
# zu lesende IDs basierend auf ResIDBase bestimmen
ErgIDs=[ResIDBase+ext for ext in ResChannelTypes]
IMDIErgIDs=['IMDI.'+ID for ID in ErgIDs]
ErgIDsAll=[*ErgIDs,*IMDIErgIDs]
# Daten lesen von TC-H5s
dfFiltered=lx.getTCsFromH5s(timeStart=timeStart,timeEnd=timeEnd,LDSResOnly=True,LDSResColsSpecified=ErgIDsAll,LDSResTypeSpecified=LDSResBaseType,timeShiftPair=timeShiftPair)
# Spalten umbenennen
colDct={}
for col in dfFiltered.columns:
m=re.search(Lx.pID,col)
colDct[col]=m.group('E')
dfResVec=dfFiltered.rename(columns=colDct)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfResVec
def fGetResTimes(
ResIDBases=[] # Liste der Wortstaemme der Ergebnisvektoren
,df=pd.DataFrame() # TCsLDSRes...
,ResChannelTypes=ResChannelTypes # ['STAT_S','AL_S','STAT_S'] # Liste der Ergebnisvektoren Postfixe
,ResChannelFunctions=ResChannelFunctions # [fResValidSeriesSTAT_S,ResValidSeriesAL_S,fResValidSeriesSTAT_S601] # Liste der Ergebnisvektoren Funktionen
,ResChannelResultNames=ResChannelResultNames # ['Zustaendig','Alarm','Stoerung'] # Liste der key-Namen der Ergebnisse
,tdAllowed=pd.Timedelta('1 second') # erlaubte Zeitspanne zwischen geht und kommt (die beiden an diese Zeitspanne angrenzenden Zeitbereiche werden als 1 Zeit gewertet)
):
"""
Return: dct
key: ResIDBase
value: dct:
key: ResChannelResultName
Value: Liste mit Zeitpaaren (oder leere Liste)
"""
resTimesDct={}
for ResIDBase in ResIDBases:
tPairsDct={}
for idx,ext in enumerate(ResChannelTypes):
ID=ResIDBase+ext
if ext == 'AL_S':
debugOutput=True
else:
debugOutput=False
if ID in df:
#print("{:s} in Ergliste".format(ID))
tPairs=findAllTimeIntervallsSeries(
s=df[ID].dropna() #!
,fct=ResChannelFunctions[idx]
,tdAllowed=tdAllowed#pd.Timedelta('1 second')
,debugOutput=debugOutput
)
else:
#print("{:s} nicht in Ergliste".format(ID))
tPairs=[]
tPairsDct[ResChannelResultNames[idx]]=tPairs
resTimesDct[ResIDBase]=tPairsDct
return resTimesDct
def getAlarmStatistikData(
h5File='a.h5'
,dfSegsNodesNDataDpkt=pd.DataFrame()
,timeShiftPair=None # z.B. (1,'H') bei Replay
):
"""
Returns TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
TCsLDSRes1=pd.DataFrame()
TCsLDSRes2=pd.DataFrame()
try:
# "connect" to the App Logs
lx=Lx.AppLog(h5File=h5File)
if hasattr(lx, 'h5FileLDSRes'):
logger.error("{0:s}{1:s}".format(logStr,'In den TCs nur Res und nicht Res1 und Res2?!'))
raise RmError
# zu lesende Daten ermitteln
l=dfSegsNodesNDataDpkt['DruckResIDBase'].unique()
l = l[~pd.isnull(l)]
DruckErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
#
l=dfSegsNodesNDataDpkt['SEGResIDBase'].unique()
l = l[~pd.isnull(l)]
SEGErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
ErgIDs=[*DruckErgIDs,*SEGErgIDs]
# Daten lesen
TCsLDSRes1,TCsLDSRes2=lx.getTCsFromH5s(LDSResOnly=True,LDSResColsSpecified=ErgIDs,timeShiftPair=timeShiftPair)
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
dfCVDataOnly=lx.getCVDFromH5(timeDelta=timeDelta,returnDfCVDataOnly=True)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
def processAlarmStatistikData(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,tdAllowed=None # pd.Timedelta('1 second')
# Alarm geht ... Alarm kommt (wieder): wenn Zeitspanne ... <= tdAllowed, dann wird dies _gewertet als dieselbe Alarmzeitspanne
# d.h. es handelt sich _gewertet inhaltlich um denselben Alarm
# None zählt die Alarme strikt getrennt
):
"""
Returns: SEGResDct,DruckResDct
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.<KEY>.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: <KEY>S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Zeiten SEGErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['SEGResIDBase'].unique() if not pd.isnull(baseID)]
SEGResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes1,tdAllowed=tdAllowed)
logger.debug("{:s}SEGResDct: {!s:s}".format(logStr,SEGResDct))
# Zeiten DruckErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['DruckResIDBase'].unique() if not pd.isnull(baseID)]
DruckResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes2,tdAllowed=tdAllowed)
logger.debug("{:s}DruckResDct: {!s:s}".format(logStr,DruckResDct))
# verschiedene Auspraegungen pro Alarmzeit ermitteln
for ResDct, ResSrc, LDSResBaseType in zip([SEGResDct, DruckResDct],[TCsLDSRes1,TCsLDSRes2],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
for keyStr, colExt in zip(['AL_S_SB_S','<KEY>'],['SB_S','ZHKNR_S']):
lGes=[]
if tPairs != []:
for tPair in tPairs:
col=ID+colExt
lSingle=ResSrc.loc[tPair[0]:tPair[1],col]
lSingle=[int(x) for x in lSingle if pd.isnull(x)==False]
lSingle=[lSingle[0]]+[lSingle[i] for i in range(1,len(lSingle)) if lSingle[i]!=lSingle[i-1]]
lGes.append(lSingle)
IDDct[keyStr]=lGes
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def addNrToAlarmStatistikData(
SEGResDct={}
,DruckResDct={}
,dfAlarmEreignisse=pd.DataFrame()
):
"""
Returns: SEGResDct,DruckResDct added with key AL_S_NR
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_SPV.In.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: AL_S_ZHKNR_S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
# ergänzt:
key: AL_S_NR: value: Liste mit der Nr. (aus dfAlarmEreignisse) pro Alarm (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
#
for ResDct, LDSResBaseType in zip([SEGResDct, DruckResDct],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
lNr=[]
if tPairs != []:
ZHKNRnListen=IDDct['AL_S_ZHKNR_S']
for idxAlarm,tPair in enumerate(tPairs):
ZHKNRnListe=ZHKNRnListen[idxAlarm]
ZHKNR=ZHKNRnListe[0]
ae=AlarmEvent(tPair[0],tPair[1],ZHKNR,LDSResBaseType)
Nr=dfAlarmEreignisse[dfAlarmEreignisse['AlarmEvent']==ae]['Nr'].iloc[0]
lNr.append(Nr)
IDDct['AL_S_NR']=lNr
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def processAlarmStatistikData2(
DruckResDct=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
):
"""
Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
Returns: SEGDruckResDct
ResDct:
key: baseID
value: dct
sortiert und direkt angrenzende oder gar ueberlappende Zeiten aus Druckergebnissen zusammenfasst
key: Zustaendig: value: Zeitbereiche, in denen ein Druckergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen ein Druckergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen ein Druckergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
voneiander verschiedene Ausprägungen (sortiert) aus Druckergebnissen
key: AL_S_SB_S: Liste
key: AL_S_ZHKNR_S: Liste
key: AL_S_NR: Liste
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
SEGDruckResDct={}
# merken, ob eine ID bereits bei einem SEG gezählt wurde; die Alarme einer ID sollen nur bei einem SEG gezaehlt werden
IDBereitsGezaehlt={}
# über alle DruckErgs
for idx,(ID,tPairsDct) in enumerate(DruckResDct.items()):
# SEG ermitteln
# ein DruckErg kann zu mehreren SEGs gehoeren z.B. gehoert ein Verzweigungsknoten i.d.R. zu 3 versch. SEGs
tupleLst=getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt,ID)
for idxTuple,(DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) in enumerate(tupleLst):
# wenn internes SEG
if SEGOnlyInLDSPara:
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt da dieses SEG intern.".format(logStr,ID,SEGName))
continue
# ID wurde bereits gezählt
if ID in IDBereitsGezaehlt.keys():
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern wurde bereits bei SEGName {:s} gezaehlt.".format(logStr,ID,SEGName,IDBereitsGezaehlt[ID]))
continue
else:
# ID wurde noch nicht gezaehlt
IDBereitsGezaehlt[ID]=SEGName
#if idxTuple>0:
# logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern nur bei SEGName {:s}.".format(logStr,ID,SEGName,tupleLst[0][1]))
# continue
if len(tPairsDct['Alarm'])>0:
logger.debug("{:s}SEGName {:20s}: durch ID {:40s} mit Alarm. Nr des Verweises von ID auf ein Segment: {:d}".format(logStr,SEGName,ID, idxTuple+1))
if SEGResIDBase not in SEGDruckResDct.keys():
# auf dieses SEG wurde noch nie verwiesen
SEGDruckResDct[SEGResIDBase]=deepcopy(tPairsDct) # das Segment erhält die Ergebnisse des ersten Druckvektors der zum Segment gehört
else:
# ergaenzen
# Zeitlisten ergänzen
for idx2,ext in enumerate(ResChannelTypes):
tPairs=tPairsDct[ResChannelResultNames[idx2]]
for idx3,tPair in enumerate(tPairs):
if True: #tPair not in SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]]: # keine identischen Zeiten mehrfach zaehlen
# die Ueberlappung von Zeiten wird weiter unten behandelt
SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]].append(tPair)
# weitere Listen ergaenzen
for ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']:
SEGDruckResDct[SEGResIDBase][ext]=SEGDruckResDct[SEGResIDBase][ext]+tPairsDct[ext]
# Ergebnis: sortieren und dann direkt angrenzende oder gar ueberlappende Zeiten zusammenfassen
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']: # keine Zeiten
pass
else:
tPairs=tPairsDct[ResChannelResultNames[idx2]]
tPairs=sorted(tPairs,key=lambda tup: tup[0])
tPairs=fCombineSubsequenttPairs(tPairs)
SEGDruckResDct[ID][ResChannelResultNames[idx2]]=tPairs
# voneiander verschiedene Ausprägungen (sortiert)
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
v=tPairsDct[ext]
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S']: # Liste von Listen
l=[*{*chain.from_iterable(v)}]
l=sorted(pd.unique(l))
SEGDruckResDct[ID][ext]=l
elif ext in ['AL_S_NR']: # Liste
l=sorted(pd.unique(v))
SEGDruckResDct[ID][ext]=l
else:
pass
logger.debug("{:s}SEGDruckResDct: {!s:s}".format(logStr,SEGDruckResDct))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGDruckResDct
def buildAlarmDataframes(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,dfCVDataOnly=pd.DataFrame()
,SEGResDct={}
,DruckResDct={}
,replaceTup=('2021-','')
,NrBy=['LDSResBaseType','SEGName','Ort','tA','ZHKNR']
,NrAsc=[False]+4*[True]
):
"""
Returns dfAlarmStatistik,dfAlarmEreignisse,SEGDruckResDct
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfAlarmStatistik=pd.DataFrame()
dfAlarmEreignisse=pd.DataFrame()
try:
# Ereignisse
dfAlarmEreignisse=buildDfAlarmEreignisse(
SEGResDct=SEGResDct
,DruckResDct=DruckResDct
,TCsLDSRes1=TCsLDSRes1
,TCsLDSRes2=TCsLDSRes2
,dfCVDataOnly=dfCVDataOnly
,dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt
,replaceTup=replaceTup
,NrBy=NrBy
,NrAsc=NrAsc
)
# in dfAlarmEreignisse erzeugte Alarm-Nr. an Dct merken
SEGResDct,DruckResDct=addNrToAlarmStatistikData(
SEGResDct
,DruckResDct
,dfAlarmEreignisse
)
# BZKat der Alarme
def fGetAlarmKat(row):
"""
"""
# baseID des Alarms
baseID=row['OrteIDs'][0]
# dct des Alarms
if row['LDSResBaseType']=='SEG':
dct=SEGResDct[baseID]
else:
dct=DruckResDct[baseID]
# Nrn der baseID
Nrn=dct['AL_S_NR']
# idx dieses Alarms innerhalb der Alarme der baseID
idxAl=Nrn.index(row['Nr'])
# Zustaende dieses alarms
SB_S=dct['AL_S_SB_S'][idxAl]
kat=''
if 3 in SB_S:
kat='instationär'
else:
if 2 in SB_S:
kat = 'schw. instationär'
else:
if 1 in SB_S:
kat = 'stat. Fluss'
elif 4 in SB_S:
kat = 'stat. Ruhe'
return kat
dfAlarmEreignisse['BZKat']=dfAlarmEreignisse.apply(lambda row: fGetAlarmKat(row),axis=1)
# Segment-verdichtete Druckergebnisse
SEGDruckResDct=processAlarmStatistikData2(
DruckResDct
,TCsLDSRes2
,dfSegsNodesNDataDpkt
)
# Alarmstatistik bilden
dfAlarmStatistik=dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]
dfAlarmStatistik=dfAlarmStatistik[['DIVPipelineName','SEGName','SEGNodes','SEGResIDBase']].drop_duplicates(keep='first').reset_index(drop=True)
dfAlarmStatistik['Nr']=dfAlarmStatistik.apply(lambda row: "{:2d}".format(int(row.name)),axis=1)
# SEG
dfAlarmStatistik['FörderZeiten']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Zustaendig'])
dfAlarmStatistik['FörderZeitenAl']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Alarm'])
dfAlarmStatistik['FörderZeitenSt']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Stoerung'])
dfAlarmStatistik['FörderZeitenAlAnz']=dfAlarmStatistik['FörderZeitenAl'].apply(lambda x: len(x))
dfAlarmStatistik['FörderZeitenAlSbs']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['AL_S_SB_S'])
dfAlarmStatistik['FörderZeitenAlNrn']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['AL_S_NR'])
# Druck (SEG-verdichtet)
dfAlarmStatistik['RuheZeiten']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Zustaendig'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAl']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Alarm'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenSt']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Stoerung'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAlSbs']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['AL_S_SB_S'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAlNrn']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['AL_S_NR'] if x in SEGDruckResDct.keys() else [])
#dfAlarmStatistik['RuheZeitenAlAnz']=dfAlarmStatistik['RuheZeitenAl'].apply(lambda x: len(x))
dfAlarmStatistik['RuheZeitenAlAnz']=dfAlarmStatistik['RuheZeitenAlNrn'].apply(lambda x: len(x))
# je 3 Zeiten bearbeitet
dfAlarmStatistik['FörderZeit']=dfAlarmStatistik['FörderZeiten'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeit']=dfAlarmStatistik['RuheZeiten'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['FörderZeitAl']=dfAlarmStatistik['FörderZeitenAl'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeitAl']=dfAlarmStatistik['RuheZeitenAl'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['FörderZeitSt']=dfAlarmStatistik['FörderZeitenSt'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeitSt']=dfAlarmStatistik['RuheZeitenSt'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse, dfAlarmStatistik,SEGDruckResDct
def plotDfAlarmStatistik(
dfAlarmStatistik=pd.DataFrame()
):
"""
Returns the plt.table
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
df=dfAlarmStatistik[[
'Nr'
,'DIVPipelineName'
,'SEGName'
,'FörderZeit'
,'FörderZeitenAlAnz'
,'FörderZeitAl'
,'FörderZeitSt'
,'RuheZeit'
,'RuheZeitenAlAnz'
,'RuheZeitAl'
,'RuheZeitSt'
]].copy()
# diese Zeiten um (Störzeiten) annotieren
df['FörderZeit']=df.apply(lambda row: "{!s:s} ({!s:s})".format(row['FörderZeit'],row['FörderZeitSt']) if row['FörderZeitSt'] > 0. else row['FörderZeit'] ,axis=1)
df['RuheZeit']=df.apply(lambda row: "{!s:s} ({!s:s})".format(row['RuheZeit'],row['RuheZeitSt']) if row['RuheZeitSt'] > 0. else row['RuheZeit'],axis=1)
# LfdNr. annotieren
df['LfdNr']=df.apply(lambda row: "{:2d} - {:s}".format(int(row.Nr)+1,str(row.DIVPipelineName)),axis=1)
# Zeiten Alarm um Alarm-Nrn annotieren
def fAddZeitMitNrn(zeit,lAlNr):
if len(lAlNr) > 0:
if len(lAlNr) <= 3:
return "{!s:s} (Nrn.: {!s:s})".format(zeit,lAlNr)
else:
# mehr als 3 Alarme...
return "{!s:s} (Nrn.: {!s:s}, ...)".format(zeit,lAlNr[0])
else:
return zeit
df['FörderZeitAl']=dfAlarmStatistik.apply(lambda row: fAddZeitMitNrn(row['FörderZeitAl'],row['FörderZeitenAlNrn']),axis=1)
df['RuheZeitAl']=dfAlarmStatistik.apply(lambda row: fAddZeitMitNrn(row['RuheZeitAl'],row['RuheZeitenAlNrn']),axis=1)
df=df[[
'LfdNr'
,'SEGName'
,'FörderZeit'
,'FörderZeitenAlAnz'
,'FörderZeitAl'
,'RuheZeit'
,'RuheZeitenAlAnz'
,'RuheZeitAl'
]]
try:
t=plt.table(cellText=df.values, colLabels=df.columns, loc='center')
cols=df.columns.to_list()
colIdxLfdNr=cols.index('LfdNr')
colIdxFoerderZeit=cols.index('FörderZeit')
colIdxFoerderZeitenAlAnz=cols.index('FörderZeitenAlAnz')
colIdxFoerderZeitAl=cols.index('FörderZeitAl')
colIdxRuheZeit=cols.index('RuheZeit')
colIdxRuheZeitenAlAnz=cols.index('RuheZeitenAlAnz')
colIdxRuheZeitAl=cols.index('RuheZeitAl')
cells = t.properties()["celld"]
for cellTup,cellObj in cells.items():
cellObj.set_text_props(ha='left')
row,col=cellTup # row: 0 fuer Ueberschrift bei Ueberschrift; col mit 0
if row == 0:
if col in [colIdxRuheZeit,colIdxRuheZeitenAlAnz,colIdxRuheZeitAl]:
pass
cellObj.set_text_props(backgroundcolor='plum')
elif col in [colIdxFoerderZeit,colIdxFoerderZeitenAlAnz,colIdxFoerderZeitAl]:
pass
cellObj.set_text_props(backgroundcolor='lightsteelblue')
if col == colIdxLfdNr:
if row==0:
continue
if 'color' in dfAlarmStatistik.columns.to_list():
color=dfAlarmStatistik['color'].iloc[row-1]
cellObj.set_text_props(backgroundcolor=color)
if col == colIdxFoerderZeit:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'FörderZeit']==0:
pass
else:
if dfAlarmStatistik.loc[row-1,'FörderZeitSt']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
cellObj.set_text_props(backgroundcolor='goldenrod')
if col == colIdxFoerderZeitenAlAnz:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'FörderZeit']==0:
cellObj.set_text_props(backgroundcolor='lightgrey')
else: # hat Förderzeit
if df.loc[row-1,'FörderZeitenAlAnz']==0:
cellObj.set_text_props(backgroundcolor='springgreen')
else:
cellObj.set_text_props(ha='center')
cellObj.set_text_props(backgroundcolor='navajowhite') # palegoldenrod
#if df.loc[row-1,'FörderZeitAl']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
if dfAlarmStatistik.loc[row-1,'FörderZeitAl']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
cellObj.set_text_props(backgroundcolor='tomato')
if col == colIdxRuheZeit:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'RuheZeit']==0:
pass
else:
if dfAlarmStatistik.loc[row-1,'RuheZeitSt']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
cellObj.set_text_props(backgroundcolor='goldenrod')
if col == colIdxRuheZeitenAlAnz:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'RuheZeit']==0:
cellObj.set_text_props(backgroundcolor='lightgrey')
else: # hat Ruhezeit
if df.loc[row-1,'RuheZeitenAlAnz']==0:
cellObj.set_text_props(backgroundcolor='springgreen')
else:
pass
cellObj.set_text_props(ha='center')
cellObj.set_text_props(backgroundcolor='navajowhite') # # palegoldenrod
#if df.loc[row-1,'RuheZeitAl']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
if dfAlarmStatistik.loc[row-1,'RuheZeitAl']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
cellObj.set_text_props(backgroundcolor='tomato')
plt.axis('off')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return t
def fOrteStripped(LDSResBaseType,OrteIDs):
"""
returns Orte stripped
"""
if LDSResBaseType == 'SEG': # 'Objects.3S_FBG_SEG_INFO.3S_L_6_MHV_02_FUD.In.']
orteStripped=[]
for OrtID in OrteIDs:
pass
m=re.search(Lx.pID,OrtID+'dummy')
ortStripped=m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_'+m.group('C6')
orteStripped.append(ortStripped)
return orteStripped
elif LDSResBaseType == 'Druck': # Objects.3S_FBG_DRUCK.3S_6_BNV_01_PTI_01.In
orteStripped=[]
for OrtID in OrteIDs:
pass
m=re.search(Lx.pID,OrtID+'dummy')
ortStripped=m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
orteStripped.append(ortStripped)
return orteStripped
else:
return None
def fCVDTime(row,dfSEG,dfDruck,replaceTup=('2021-','')):
"""
in:
dfSEG/dfDruck: TCsLDSRes1/TCsLDSRes2
row: Zeile aus dfAlarmEreignisse
von row verwendet:
LDSResBaseType: SEG (dfSEG) oder nicht (dfDruck)
OrteIDs: ==> ID von ZHKNR_S in dfSEG/dfDruck
ZHKNR: ZHKNR
returns:
string: xZeitA - ZeitEx
ZeitA: erste Zeit in der ZHKNR_S in dfSEG/dfDruck den Wert von ZHKNR trägt
ZeitE: letzte Zeit in der ZHKNR_S in dfSEG/dfDruck den Wert von ZHKNR trägt
xZeitA, wenn ZeitA die erste Zeit in dfSEG/dfDruck ist mit einem von Null verschiedenen Wert
xZeitE, wenn ZeitE die letzte Zeit in dfSEG/dfDruck ist mit einem von Null verschiedenen Wert
in Zeit wurde replaceTup angewendet
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
Time=""
ID=row['OrteIDs'][0]+'ZHKNR_S'
ZHKNR=row['ZHKNR']
if row['LDSResBaseType']=='SEG':
df=dfSEG
else:
df=dfDruck
s=df[df[ID]==ZHKNR][ID] # eine Spalte; Zeilen in denen ZHKNR_S den Wert von ZHKNR trägt
tA=s.index[0] # 1. Zeit
tE=s.index[-1] # letzte Zeit
Time=" {!s:s} - {!s:s} ".format(tA,tE)
try:
if tA==df[ID].dropna().index[0]:
Time='x'+Time.lstrip()
except:
logger.debug("{0:s}Time: {1:s}: x-tA Annotation Fehler; keine Annotation".format(logStr,Time))
try:
if tE==df[ID].dropna().index[-1]:
Time=Time.rstrip()+'x'
except:
logger.debug("{0:s}Time: {1:s}: x-tE Annotation Fehler; keine Annotation".format(logStr,Time))
Time=Time.replace(replaceTup[0],replaceTup[1])
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return Time
def buildDfAlarmEreignisse(
SEGResDct={}
,DruckResDct={}
,TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfCVDataOnly=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,replaceTup=('2021-','')
,NrBy=['LDSResBaseType','SEGName','Ort','tA','ZHKNR'] # Sortierspalten für die Nr. der Ereignisse
,NrAsc=[False]+4*[True] # aufsteigend j/n für die o.g. Sortierspalten
):
"""
Returns dfAlarmEreignisse:
Nr: lfd. Nr (gebildet gem. NrBy und NrAsc)
tA: Anfangszeit
tE: Endezeit
tD: Dauer des Alarms
ZHKNR: ZHKNR (die zeitlich 1., wenn der Alarm sich über mehrere ZHKNRn erstreckt)
tD_ZHKNR: Lebenszeit der ZHKNR; x-Annotationen am Anfang/Ende, wenn ZHK beginnt bei Res12-Anfang / andauert bei Res12-Ende; '-1', wenn Lebenszeit nicht ermittelt werden konnte
ZHKNRn: sortierte Liste der ZHKNRn des Alarms; eine davon ist ZHKNR; typischerweise die 1. der Liste
LDSResBaseType: SEG oder Druck
OrteIDs: OrteIDs des Alarms
Orte: Kurzform von OrteIDs des Alarms
Ort: der 1. Ort von Orte
SEGName: Segment zu dem der 1. Ort des Alarms gehört
DIVPipelineName:
Voralarm: ermittelter Vorlalarm des Alarms; -1, wenn kein Voralarm in Res12 gefunden werden konnte
Type: Typ des Kontrollraumns; z.B. p-p für vollständige Flussbilanzen; '', wenn kein Typ gefunden werden konnte
Name: Name des Bilanzraumes
NrSD: lfd. Nr Alarm BaseType
NrName: lfd. Nr Alarm Name
NrSEGName: lfd. Nr Alarm SEGName
AlarmEvent: AlarmEvent-Objekt
###BZKat: Betriebszustandskategorie des Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfAlarmEreignisse=pd.DataFrame()
try:
AlarmEvents=[] # Liste von AlarmEvent
AlarmEventsOrte={} # dct der Orte, die diesen (key) AlarmEvent melden
AlarmEventsZHKNRn={} # dct der ZHKNRn, die zu diesem (key) gehoeren
# über SEG- und Druck-Ergebnisvektoren
for ResDct, ResSrc, LDSResBaseType in zip([SEGResDct, DruckResDct],[TCsLDSRes1,TCsLDSRes2],['SEG','Druck']):
for ResIDBase,dct in ResDct.items():
AL_S=dct['Alarm']
if len(AL_S) > 0:
# eine Erg-ID weist Alarme [(tA,tE),...] auf
# korrespondiernede Liste der ZHKs: [(999,1111),...]
ZHKNRnListen=dct['AL_S_ZHKNR_S']
ID=ResIDBase+'ZHKNR_S' # fuer nachfolgende Ausgabe
# ueber alle Alarme der Erg-ID
for idx,AL_S_Timepair in enumerate(AL_S):
(t1,t2)=AL_S_Timepair # tA, tE
ZHKNR_S_Lst=ZHKNRnListen[idx] # Liste der ZHKs in dieser Zeit
if len(ZHKNR_S_Lst) != 1:
logger.warning(("{:s}ID:\n\t {:s}: Alarm {:d} der ID\n\t Zeit von {!s:s} bis {!s:s}:\n\t Anzahl verschiedener ZHKNRn !=1: {:d} {:s}:\n\t ZHKNR eines Alarms wechselt waehrend eines Alarms. Alarm wird identifiziert mit 1. ZHKNR.".format(logStr,ID
,idx
,t1
,t2
,len(ZHKNR_S_Lst)
,str(ZHKNR_S_Lst)
)))
# die erste wird verwendet
ZHKNR=int(ZHKNR_S_Lst[0])
# AlarmEvent erzeugen
alarmEvent=AlarmEvent(t1,t2,ZHKNR,LDSResBaseType)
if alarmEvent not in AlarmEvents:
# diesen Alarm gibt es noch nicht in der Ereignisliste ...
AlarmEvents.append(alarmEvent)
AlarmEventsOrte[alarmEvent]=[]
AlarmEventsZHKNRn[alarmEvent]=[]
else:
pass
# Ort ergaenzen (derselbe Alarm wird erst ab V83.5.3 nur an einem Ort - dem lexikalisch kleinsten des Bilanzraumes - ausgegeben; zuvor konnte derselbe Alarm an mehreren Orten auftreten)
AlarmEventsOrte[alarmEvent].append(ResIDBase)
# ZHKNR(n) ergaenzen (ein Alarm wird unter 1 ZHKNR geführt)
AlarmEventsZHKNRn[alarmEvent].append(ZHKNR_S_Lst)
# df erzeugen
dfAlarmEreignisse=pd.DataFrame.from_records(
[alarmEvent for alarmEvent in AlarmEvents],
columns=AlarmEvent._fields
)
# Liste der EventOrte erstellen, zuweisen
l=[]
for idx,alarmEvent in enumerate(AlarmEvents):
l.append(AlarmEventsOrte[alarmEvent])
dfAlarmEreignisse['OrteIDs']=l
# abgekuerzte Orte
dfAlarmEreignisse['Orte']=dfAlarmEreignisse.apply(lambda row: fOrteStripped(row.LDSResBaseType,row.OrteIDs),axis=1)
dfAlarmEreignisse['Ort']=dfAlarmEreignisse['Orte'].apply(lambda x: x[0])
# Liste der ZHKNRn erstellen, zuweisen
l=[]
for idx,alarmEvent in enumerate(AlarmEvents):
lOfZl=AlarmEventsZHKNRn[alarmEvent]
lOfZ=[*{*chain.from_iterable(lOfZl)}]
lOfZ=sorted(pd.unique(lOfZ))
l.append(lOfZ)
dfAlarmEreignisse['ZHKNRn']=l
# Segmentname eines Ereignisses
dfAlarmEreignisse['SEGName']=dfAlarmEreignisse.apply(lambda row:
dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==row['OrteIDs'][0]]['SEGName'].iloc[0] if row['LDSResBaseType']=='SEG'
else [tuple for tuple in getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt,row['OrteIDs'][0]) if not tuple[-1]][0][1],axis=1)
# DIVPipelineName eines Ereignisses
dfAlarmEreignisse['DIVPipelineName']=dfAlarmEreignisse.apply(lambda row:
dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGName']==row['SEGName']]['DIVPipelineName'].iloc[0]
,axis=1)
# Alarm: ---
#tA: Anfangszeit
#tE: Endezeit
#ZHKNR: ZHKNR (1. bei mehreren Alarmen)
#LDSResBaseType: SEG oder Druck
# Orte: ---
#OrteIDs: OrteIDs des Alarms
#Orte: Kurzform von OrteIDs des Alarms
#ZHKNRn:
#SEGName: Segmentname
#DIVPipelineName
## Nr.
dfAlarmEreignisse.sort_values(by=NrBy,ascending=NrAsc,inplace=True)
dfAlarmEreignisse['Nr']=dfAlarmEreignisse.index+1
#dfAlarmEreignisse['Nr']=dfAlarmEreignisse['Nr']+1
logger.debug("{0:s}{1:s}: {2:s}".format(logStr,'dfAlarmEreignisse',dfAlarmEreignisse.to_string()))
# Voralarm
VoralarmTypen=[]
for index, row in dfAlarmEreignisse.iterrows():
# zur Information bei Ausgaben
OrteIDs=row['OrteIDs']
OrtID=OrteIDs[0]
VoralarmTyp=None
try:
if row['LDSResBaseType']=='SEG':
VoralarmTyp=TCsLDSRes1.loc[:row['tA']-pd.Timedelta('1 second'),OrtID+'AL_S'].iloc[-1]
elif row['LDSResBaseType']=='Druck':
VoralarmTyp=TCsLDSRes2.loc[:row['tA']-pd.Timedelta('1 second'),OrtID+'AL_S'].iloc[-1]
except:
pass
if pd.isnull(VoralarmTyp): # == None: #?! - ggf. Nachfolger eines neutralen Bilanzraumwechsels
VoralarmTyp=-1
logger.warning("{:s}PV: {:40s} Alarm Nr. {:d} ZHKNR {:d}\n\t tA {!s:s}: kein (isnull) Vorlalarm gefunden?! (ggf. neutraler BRWechsel) - Voralarm gesetzt auf: {:d}".format(logStr
,row['OrteIDs'][0]
,int(row['Nr'])
,row['ZHKNR']
,row['tA'],int(VoralarmTyp)))
if int(VoralarmTyp)==0: # == 0: #?! - ggf. Nachfolger eines neutralen Bilanzraumwechsels
VoralarmTyp=0
logger.warning("{:s}PV: {:40s} Alarm Nr. {:d} ZHKNR {:d}\n\t tA {!s:s}: Vorlalarm 0?! (ggf. war Bilanz in Stoerung)".format(logStr
,row['OrteIDs'][0]
,int(row['Nr'])
,row['ZHKNR']
,row['tA']))
if int(VoralarmTyp) not in [-1,0,3,4,10]:
logger.warning("{:s}PV: {:s} Alarm Nr. {:d} {:d} tA {!s:s}: unbekannter Vorlalarm gefunden: {:d}".format(logStr,row['OrteIDs'][0],int(row['Nr']),row['ZHKNR'],row['tA'],int(VoralarmTyp)))
logger.debug("{:s}{:d} {!s:s} VoralarmTyp:{:d}".format(logStr,int(row['Nr']),row['tA'],int(VoralarmTyp)))
VoralarmTypen.append(VoralarmTyp)
dfAlarmEreignisse['Voralarm']=[int(x) for x in VoralarmTypen]
# Type (aus dfCVDataOnly) und Erzeugungszeit (aus dfCVDataOnly) und Name (aus dfCVDataOnly)
dfAlarmEreignisse['ZHKNR']=dfAlarmEreignisse['ZHKNR'].astype('int64')
dfAlarmEreignisse['ZHKNRStr']=dfAlarmEreignisse['ZHKNR'].astype('string')
dfCVDataOnly['ZHKNRStr']=dfCVDataOnly['ZHKNR'].astype('string')
# wg. aelteren App-Log Versionen in denen ZHKNR in dfCVDataOnly nicht ermittelt werden konnte
# Type,ScenTime,Name sind dann undefiniert
dfAlarmEreignisse=pd.merge(dfAlarmEreignisse,dfCVDataOnly,on='ZHKNRStr',suffixes=('','_CVD'),how='left').filter(items=dfAlarmEreignisse.columns.to_list()+['Type'
#,'ScenTime'
,'Name'])
dfAlarmEreignisse=dfAlarmEreignisse.drop(['ZHKNRStr'],axis=1)
dfAlarmEreignisse=dfAlarmEreignisse.fillna(value='')
# lfd. Nummern
dfAlarmEreignisse['NrSD']=dfAlarmEreignisse.groupby(['LDSResBaseType']).cumcount() + 1
dfAlarmEreignisse['NrName']=dfAlarmEreignisse.groupby(['Name']).cumcount() + 1
dfAlarmEreignisse['NrSEGName']=dfAlarmEreignisse.groupby(['SEGName']).cumcount() + 1
# Lebenszeit der ZHKNR
try:
dfAlarmEreignisse['tD_ZHKNR']=dfAlarmEreignisse.apply(lambda row: fCVDTime(row,TCsLDSRes1,TCsLDSRes2,replaceTup),axis=1)
except:
logger.debug("{:s}Spalte tD_ZHKNR (Lebenszeit einer ZHKNR) konnte nicht ermittelt werden. Vmtl. aeltere App-Log Version.".format(logStr))
dfAlarmEreignisse['tD_ZHKNR']='-1'
# Dauer des Alarms
dfAlarmEreignisse['tD']=dfAlarmEreignisse.apply(lambda row: row['tE']-row['tA'],axis=1)
dfAlarmEreignisse['tD']= dfAlarmEreignisse['tD'].apply(lambda x: "{!s:s}".format(x).replace('days','Tage').replace('0 Tage','').replace('Tage','T'))
# AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
dfAlarmEreignisse=dfAlarmEreignisse[['Nr','tA', 'tE','tD','ZHKNR','tD_ZHKNR','ZHKNRn','LDSResBaseType'
,'OrteIDs', 'Orte', 'Ort', 'SEGName','DIVPipelineName'
,'Voralarm', 'Type', 'Name'
,'NrSD', 'NrName', 'NrSEGName'
]]
dfAlarmEreignisse['AlarmEvent']=dfAlarmEreignisse.apply(lambda row: AlarmEvent(row['tA'],row['tE'],row['ZHKNR'],row['LDSResBaseType']),axis=1)
# unklar, warum erforderlich
dfAlarmEreignisse['Nr']=dfAlarmEreignisse.index+1
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse
def fCVDName(Name
):
"""
"""
lName=len(Name)
if len(Name)==0:
Name='ZHKName vmtl. nicht in Log'
lNameMaxH=20
if lName > 2*lNameMaxH:
Name=Name[:lNameMaxH-2]+'....'+Name[lName-lNameMaxH+2:]
Name=Name.replace('°','|')
return Name
def plotDfAlarmEreignisse(
dfAlarmEreignisse=pd.DataFrame()
,sortBy=[]
,replaceTup=('2021-','')
,replaceTuptD=('0 days','')
):
"""
Returns the plt.table
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
df=dfAlarmEreignisse[['Nr','LDSResBaseType','Voralarm','Type','NrSD','tA','tE','tD','ZHKNR','Name','Orte','tD_ZHKNR','NrName','NrSEGName','SEGName','BZKat']].copy()
df['tA']=df['tA'].apply(lambda x: str(x).replace(replaceTup[0],replaceTup[1]))
df['tE']=df['tE'].apply(lambda x: str(x).replace(replaceTup[0],replaceTup[1]))
###df['Anz']=df['Orte'].apply(lambda x: len(x))
##df['Orte']=df['Orte'].apply(lambda x: str(x).replace('[','').replace(']','').replace("'",""))
df['Orte']=df['Orte'].apply(lambda x: str(x[0]))
df['LDSResBaseType']=df.apply(lambda row: "{:s} {:s} - {:d}".format(row['LDSResBaseType'],row['Type'],row['Voralarm']),axis=1)
df=df[['Nr','LDSResBaseType','NrSD','tA','tE','tD','ZHKNR','Name','NrName','NrSEGName','SEGName','tD_ZHKNR','Orte','BZKat']]
df.rename(columns={'LDSResBaseType':'ResTyp - Voralarm'},inplace=True)
df.rename(columns={'tD_ZHKNR':'ZHKZeit','Name':'ZHKName'},inplace=True)
###df['ZHKName']=df['ZHKName'].apply(lambda x: fCVDName(x))
####df['ZHKName']=df['Orte'].apply(lambda x: x[0])
df['NrSEGName (SEGName)']=df.apply(lambda row: "{!s:2s} ({!s:s})".format(row['NrSEGName'],row['SEGName']),axis=1)
df=df[['Nr','ResTyp - Voralarm','NrSD','tA','tD','ZHKNR'
,'Orte' #'ZHKName'
,'BZKat'
,'NrName','NrSEGName (SEGName)','ZHKZeit']]
df.rename(columns={'Orte':'ID'},inplace=True)
df['tD']=df['tD'].apply(lambda x: str(x).replace(replaceTuptD[0],replaceTuptD[1]))
def fGetZHKNRStr(row,dfOrig):
"""
returns:
ZHKNStr in Abhängigkeit der aktuellen Zeile und dfOrig
"""
s=dfOrig[dfOrig['Nr']==row['Nr']].iloc[0]
if len(s.ZHKNRn)>1:
if len(s.ZHKNRn)==2:
return "{:d} ({!s:s})".format(row['ZHKNR'],s.ZHKNRn[1:])
else:
return "{:d} (+{:d})".format(row['ZHKNR'],len(s.ZHKNRn)-1)
else:
return "{:d}".format(row['ZHKNR'])
df['ZHKNR']=df.apply(lambda row: fGetZHKNRStr(row,dfAlarmEreignisse),axis=1)
if sortBy!=[]:
df=df.sort_values(by=sortBy)
t=plt.table(cellText=df.values, colLabels=df.columns
,colWidths=[.03,.1 # Nr ResTyp-Voralarm
,.04 # NrSD
,.08,.08 # tA tD
,.085 # ZHKNR
,.1125,.07 #.1125 # ID BZKat
,.04 # NrName
,.14 # NrSEGName (SEGName)
,.2125] # ZHKZeit
, cellLoc='left'
, loc='center')
t.auto_set_font_size(False)
t.set_fontsize(10)
cols=df.columns.to_list()
#colIdxOrte=cols.index('Orte')
#colIdxName=cols.index('ZHKName')
colIdxNrSD=cols.index('NrSD')
colIdxNrSEG=cols.index('NrSEGName (SEGName)')
# ResTyp - Voralarm
colIdxResTypVA=cols.index('ResTyp - Voralarm')
cells = t.properties()["celld"]
for cellTup,cellObj in cells.items():
cellObj.set_text_props(ha='left')
row,col=cellTup # row: 0 fuer Ueberschrift bei Ueberschrift; col mit 0
#if col == colIdxName:
# cellObj.set_text_props(ha='left')
if col == colIdxNrSD:
if row > 0:
if dfAlarmEreignisse.loc[row-1,'LDSResBaseType']=='SEG':
cellObj.set_text_props(backgroundcolor='lightsteelblue')
else:
cellObj.set_text_props(backgroundcolor='plum')
elif col == colIdxNrSEG:
if row==0:
continue
if 'color' in dfAlarmEreignisse.columns.to_list():
color=dfAlarmEreignisse['color'].iloc[row-1]
cellObj.set_text_props(backgroundcolor=color)
elif col == colIdxResTypVA and row > 0:
pass
if dfAlarmEreignisse.loc[row-1,'Voralarm'] in [10]:
cellObj.set_text_props(backgroundcolor='sandybrown')
elif dfAlarmEreignisse.loc[row-1,'Voralarm'] in [4]:
cellObj.set_text_props(backgroundcolor='pink')
elif dfAlarmEreignisse.loc[row-1,'Voralarm'] in [3]:
cellObj.set_text_props(backgroundcolor='lightcoral')
else:
pass
#cellObj.set_text_props(fontsize=16)
plt.axis('off')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return t
def plotDfAlarmStatistikReportsSEGErgs(
h5File='a.h5'
,dfAlarmStatistik=pd.DataFrame()
,SEGResDct={}
,timeStart=None,timeEnd=None
,SEGErgsFile='SEGErgs.pdf'
,stopAtSEGNr=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,byhour=[0,3,6,9,12,15,18,21]
,byminute=None
,bysecond=None
,timeFloorCeilStr=None #'1H' # Runden (1 Stunde)
,timeFloorCeilStrDetailPre='6T' # Runden (3 Minuten)
,timeFloorCeilStrDetailPost='3T'
,timeShiftPair=None
):
"""
Creates PDF for all SEGs with FörderZeitenAlAnz>0
1 Base Plot and Detail Plots for the Alarms
Creates corresponding Single-PNGs
Returns xlimsDct:
key: BaseID
value: list of Timepairs of the Detail Plots for the Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lx=Lx.AppLog(h5File=h5File)
firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal=lx.getTotalLogTime()
if timeStart==None:
if timeFloorCeilStr != None:
timeStart = firstTime.floor(freq=timeFloorCeilStr)
else:
timeStart = firstTime
if timeEnd==None:
if timeFloorCeilStr != None:
timeEnd = lastTime.ceil(freq=timeFloorCeilStr)
else:
timeEnd = lastTime
logger.debug("{0:s}timeStart (ohne timeShift): {1:s} timeEnd (ohne timeShift): {2:s}".format(logStr,str(timeStart),str(timeEnd)))
xlimsDct={}
pdf=PdfPages(SEGErgsFile)
(fileNameBase,ext)= os.path.splitext(SEGErgsFile)
if timeShiftPair != None:
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
else:
timeDelta=pd.Timedelta('0 Seconds')
idxSEGPlotted=0
for idx,(index,row) in enumerate(dfAlarmStatistik.iterrows()):
if stopAtSEGNr != None:
if idxSEGPlotted>=stopAtSEGNr:
break
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,row['SEGResIDBase'])
if row['FörderZeitenAlAnz']==0: # and row['RuheZeitenAlAnz']==0:
logger.info("{:s}: FörderZeitenAlAnz: 0".format(titleStr))
continue # keine SEGs ohne Alarme drucken
# Erg lesen
ResIDBase=row['SEGResIDBase']
dfSegReprVec=getLDSResVecDf(ResIDBase=ResIDBase,LDSResBaseType='SEG',lx=lx,timeStart=timeStart,timeEnd=timeEnd,timeShiftPair=timeShiftPair)
ID='AL_S'
if ID not in dfSegReprVec.keys():
continue
idxSEGPlotted=idxSEGPlotted+1
xlimsDct[ResIDBase]=[]
logger.debug("{:s}ResIDBase: {:s} dfSegReprVec: Spalten: {!s:s}".format(logStr,ResIDBase,dfSegReprVec.columns.to_list()))
# Plot Basis ###########################################################
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStart+timeDelta,timeEnd+timeDelta)
,dateFormat=dateFormat
,byhour=byhour
,byminute=byminute
,bysecond=bysecond
,plotLegend=True
)
backgroundcolor='white'
if row['FörderZeit']==0:
backgroundcolor='lightgrey'
else: # hat Förderzeit
if row['FörderZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['FörderZeitAl']/row['FörderZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: FörderZeit: {:8.2f} FörderZeitenAlAnz: {:d}".format(row['SEGNodes'],row['FörderZeit'],row['FörderZeitenAlAnz'])
if row['FörderZeitenAlAnz'] > 0:
if row['FörderZeitenAlAnz'] <= 3:
txtNr=" Nrn.: {!s:s}".format(row['FörderZeitenAlNrn'])
else:
txtNr=" Nrn.: {!s:s} u.w.".format(row['FörderZeitenAlNrn'][0])
txt=txt+txtNr
else:
txtNr=''
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['FörderZeitSt']>0:
backgroundcolor='white'
if row['FörderZeitSt']/row['FörderZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: FörderZeitSt: {:8.2f})".format(row['FörderZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
fileName="{:s} {:2d} - {:s} {:s} {:s}.png".format(fileNameBase
,int(row.Nr)+1
,str(row.DIVPipelineName)
,row['SEGName']
,txtNr.replace('Nrn.: ','Nrn ').replace(',','').replace('[','').replace(']','').replace('u.w.','u w'))
plt.savefig(fileName)
plt.show()
###plt.clf()
plt.close()
# Plot Alarme ###########################################################
dct=SEGResDct[row['SEGResIDBase']]
timeFirstAlarmStarts,dummy=dct['Alarm'][0]
dummy,timeLastAlarmEnds=dct['Alarm'][-1]
for idxAl,AlNr in enumerate(row['FörderZeitenAlNrn']):
timeAlarmStarts,timeAlarmEnds=dct['Alarm'][idxAl]
timeStartDetail = timeAlarmStarts.floor(freq=timeFloorCeilStrDetailPre)
timeEndDetail = timeAlarmEnds.ceil(freq=timeFloorCeilStrDetailPost)
# wenn AlarmRand - PlotRand < 3 Minuten: um 3 Minuten erweitern
if timeAlarmStarts-timeStartDetail<pd.Timedelta('3 Minutes'):
timeStartDetail=timeStartDetail-pd.Timedelta('3 Minutes')
if timeEndDetail-timeAlarmEnds<pd.Timedelta('3 Minutes'):
timeEndDetail=timeEndDetail+pd.Timedelta('3 Minutes')
xlimsDct[ResIDBase].append((timeStartDetail,timeEndDetail))
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStartDetail,timeEndDetail) # wenn die dct-Zeiten time-geshifted sind ist das korrekt
,dateFormat=dateFormat
,byhour=None#byhour
,byminute=list(np.arange(0,60))#byminute
,bysecond=None#bysecond
)
backgroundcolor='white'
if row['FörderZeit']==0:
backgroundcolor='lightgrey'
else: # hat Förderzeit
if row['FörderZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['FörderZeitAl']/row['FörderZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: FörderZeit: {:8.2f} FörderZeitenAlAnz: {:d}".format(row['SEGNodes'],row['FörderZeit'],row['FörderZeitenAlAnz'])
txtNr=" Nr.: {!s:s}".format(AlNr)
txt=txt+txtNr
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['FörderZeitSt']>0:
backgroundcolor='white'
if row['FörderZeitSt']/row['FörderZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: FörderZeitSt: {:8.2f})".format(row['FörderZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
ax.set_title( titleStr,loc='left')
#logger.info("{:s}".format(titleStr))
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
#(fileName,ext)= os.path.splitext(SEGErgsFile)
fileNameAlarm="{:s} {:s}.png".format(fileName.replace('.png','')
,txtNr.replace('Nr.: ','Nr ').replace(',','').replace('[','').replace(']',''))
plt.savefig(fileNameAlarm)
plt.show()
###plt.clf()
plt.close()
###plt.close()
pdf.close()
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlimsDct
def plotDfAlarmStatistikReportsDruckErgs(
h5File='a.h5'
,dfAlarmStatistik=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,DruckResDct={}
,timeStart=None,timeEnd=None
,DruckErgsFile='DruckErgs.pdf'
,stopAtSEGNr=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,byhour=[0,3,6,9,12,15,18,21]
,byminute=None
,bysecond=None
,timeFloorCeilStr=None #'1H'
,timeFloorCeilStrDetailPre='6T' # Runden (3 Minuten)
,timeFloorCeilStrDetailPost='3T'
,timeShiftPair=None
):
"""
Creates PDF for all SEGs with RuheZeitenAlAnz>0
1 Base Plot for a Druck with an Alarm and Detail Plots for the Alarms
Creates corresponding Single-PNGs
Returns xlimsDct:
key: BaseID
value: list of Timepairs of the Detail Plots for the Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lx=Lx.AppLog(h5File=h5File)
firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal=lx.getTotalLogTime()
logger.debug("{0:s}firstTime (ohne TimeShift): {1:s} lastTime (ohne TimeShift): {2:s}".format(logStr,str(firstTime),str(lastTime)))
if timeStart==None:
if timeFloorCeilStr != None:
timeStart = firstTime.floor(freq=timeFloorCeilStr) # https://stackoverflow.com/questions/35339139/where-is-the-documentation-on-pandas-freq-tags
else:
timeStart = firstTime
if timeEnd==None:
if timeFloorCeilStr != None:
timeEnd = lastTime.ceil(freq=timeFloorCeilStr)
else:
timeEnd = lastTime
if timeShiftPair != None:
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
else:
timeDelta=pd.Timedelta('0 Seconds')
logger.debug("{0:s}timeStart abgerundet (ohne TimeShift): {1:s} timeEnd aufgerundet (ohne TimeShift): {2:s} TimeShift: {3:s}".format(logStr
,str(timeStart)
,str(timeEnd)
,str(timeDelta)))
xlimsDct={}
pdf=PdfPages(DruckErgsFile)
(fileNameBase,ext)= os.path.splitext(DruckErgsFile)
# über alle Segmente der Alarmstatistik (die DruckIDs sollen in der Reihenfolge der Alarmstatistik abgearbeitet werden)
idxSEGPlotted=0
for idx,(index,row) in enumerate(dfAlarmStatistik.iterrows()):
if row['RuheZeitenAlAnz']==0: # and row['RuheZeitenAlAnz']==0:
logger.info("LfdNr {:2d} - {:s}: {:s}: RuheZeitenAlAnz: 0".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']))
continue # keine SEGs ohne Alarme drucken
if stopAtSEGNr != None:
if idxSEGPlotted>=stopAtSEGNr:
break
idxSEGPlotted=idxSEGPlotted+1
# DruckIDs eines Segmentes
DruckIDs=sorted([ID for ID in dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGName']==row['SEGName']]['DruckResIDBase'].unique() if not pd.isnull(ID)])
for idxDruckID,DruckResIDBase in enumerate(DruckIDs):
dct=DruckResDct[DruckResIDBase]
if len(dct['Alarm'])==0:
# nur DruckIDs mit Alarmen plotten
continue
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
# Erg lesen
ResIDBase=DruckResIDBase
dfDruckReprVec=getLDSResVecDf(ResIDBase=ResIDBase,LDSResBaseType='Druck',lx=lx,timeStart=timeStart,timeEnd=timeEnd,timeShiftPair=timeShiftPair)
logger.debug("{:s}ResIDBase: {:s} dfDruckReprVec: Spalten: {!s:s}".format(logStr,ResIDBase,dfDruckReprVec.columns.to_list()))
logger.debug("{:s}ID: {:s}: timeStart (mit TimeShift): {:s} timeEnd (mit TimeShift): {:s}".format(logStr
,DruckResIDBase
,str(dfDruckReprVec.index[0])
,str(dfDruckReprVec.index[-1])
))
ID='AL_S'
if ID not in dfDruckReprVec.keys():
continue
xlimsDct[ResIDBase]=[]
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=dfDruckReprVec # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStart+timeDelta,timeEnd+timeDelta)
,dateFormat=dateFormat
,byhour=byhour
,byminute=byminute
,bysecond=bysecond
)
backgroundcolor='white'
if row['RuheZeit']==0:
backgroundcolor='lightgrey'
else: # hat Ruhezeit
if row['RuheZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['RuheZeitAl']/row['RuheZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: LfdNr {:2d}: RuheZeit: {:8.2f} RuheZeitenAlAnz: {:d}".format(
row['SEGNodes']
,int(row.Nr)+1
,row['RuheZeit']
,row['RuheZeitenAlAnz'])
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['RuheZeitSt']>0:
backgroundcolor='white'
if row['RuheZeitSt']/row['RuheZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: RuheZeitSt: {:8.2f})".format(row['RuheZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
RuheZeiten=DruckResDct[DruckResIDBase]['Zustaendig']
RuheZeit=fTotalTimeFromPairs(RuheZeiten,pd.Timedelta('1 minute'),False)
AlarmZeiten=DruckResDct[DruckResIDBase]['Alarm']
AlarmZeit=fTotalTimeFromPairs(AlarmZeiten,pd.Timedelta('1 minute'),False)
RuheZeitenSt=DruckResDct[DruckResIDBase]['Stoerung']
RuheZeitSt=fTotalTimeFromPairs(RuheZeitenSt,pd.Timedelta('1 minute'),False)
txt="Druck: RuheZeit: {:8.2f} (davon St: {:8.2f}) RuheZeitenAlAnz: {:3d}".format(
RuheZeit
,RuheZeitSt
,len(AlarmZeiten))
ax.text(.98, .15,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor='white',
transform=ax.transAxes)
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,DruckResIDBase)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# png
fileName="{:s} {:2d} - {:s} {:s} {:s}.png".format(fileNameBase
,int(row.Nr)+1
,str(row.DIVPipelineName)
,row['SEGName']
,fOrteStripped('Druck',[DruckResIDBase])[0]
)
plt.savefig(fileName)
plt.show()
pdf.savefig(fig)
plt.close()
# Plot Alarme ###########################################################
dct=DruckResDct[DruckResIDBase]
timeFirstAlarmStarts,dummy=dct['Alarm'][0]
dummy,timeLastAlarmEnds=dct['Alarm'][-1]
for idxAl,AlNr in enumerate(row['RuheZeitenAlNrn']):
timeAlarmStarts,timeAlarmEnds=dct['Alarm'][idxAl]
timeStartDetail = timeAlarmStarts.floor(freq=timeFloorCeilStrDetailPre)
timeEndDetail = timeAlarmEnds.ceil(freq=timeFloorCeilStrDetailPost)
if timeAlarmStarts-timeStartDetail<pd.Timedelta('3 Minutes'):
timeStartDetail=timeStartDetail-pd.Timedelta('3 Minutes')
if timeEndDetail-timeAlarmEnds<pd.Timedelta('3 Minutes'):
timeEndDetail=timeEndDetail+pd.Timedelta('3 Minutes')
xlimsDct[ResIDBase].append((timeStartDetail,timeEndDetail))
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=dfDruckReprVec # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStartDetail,timeEndDetail) # wenn die dct-Zeiten time-geshifted sind ist das korrekt
,dateFormat=dateFormat
,byhour=None#byhour
,byminute=list(np.arange(0,60))#byminute
,bysecond=None#bysecond
)
backgroundcolor='white'
if row['RuheZeit']==0:
backgroundcolor='lightgrey'
else: # hat Ruhezeit
if row['RuheZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['RuheZeitAl']/row['RuheZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: LfdNr {:2d}: RuheZeit: {:8.2f} RuheZeitenAlAnz: {:d}".format(
row['SEGNodes']
,int(row.Nr)+1
,row['RuheZeit']
,row['RuheZeitenAlAnz'])
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['RuheZeitSt']>0:
backgroundcolor='white'
if row['RuheZeitSt']/row['RuheZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: RuheZeitSt: {:8.2f})".format(row['RuheZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
RuheZeiten=DruckResDct[DruckResIDBase]['Zustaendig']
RuheZeit=fTotalTimeFromPairs(RuheZeiten,pd.Timedelta('1 minute'),False)
AlarmZeiten=DruckResDct[DruckResIDBase]['Alarm']
AlarmZeit=fTotalTimeFromPairs(AlarmZeiten,pd.Timedelta('1 minute'),False)
RuheZeitenSt=DruckResDct[DruckResIDBase]['Stoerung']
RuheZeitSt=fTotalTimeFromPairs(RuheZeitenSt,pd.Timedelta('1 minute'),False)
txt="Druck: RuheZeit: {:8.2f} (davon St: {:8.2f}) RuheZeitenAlAnz: {:3d} Nr. {:4d}".format(
RuheZeit
,RuheZeitSt
,len(AlarmZeiten)
,AlNr)
ax.text(.98, .15,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor='white',
transform=ax.transAxes)
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,DruckResIDBase)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
fileNameAlarm="{:s} Nr {:d}.png".format(fileName.replace('.png',''),AlNr)
plt.savefig(fileNameAlarm)
plt.show()
plt.close()
#plt.close()
pdf.close()
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlimsDct
def plotTimespans(
xlims # list of sections
,orientation='landscape' # oben HYD unten LDS; 'portrait': # links HYD rechts LDS
,pad=3.5 # tight_layout() can take keyword arguments of pad, w_pad and h_pad. These control the extra padding around the figure border and between subplots. The pads are specified in fraction of fontsize.
,w_pad=0.5
,h_pad=0.5
# 'portrait' # links HYD rechts LDS
,rectSpalteLinks=[0, 0, 0.5, 1]
,rectSpalteRechts=[0.325, 0, 1, 1]
# 'landscape': # oben HYD unten LDS
,rectZeileOben=[0, .5, 1, 1]
,rectZeileUnten=[0, 0, 1, .5]
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None #[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,figTitle='' #!
,figSave=False #!
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,sectionTitlesLDS=None # list of section titles to be used
,sectionTextsLDS=None # list of section texts to be used
,vLinesX=[] # plotted in each HYD section if X-time fits
,hLinesY=[] # plotted in each HYD section
,vAreasX=[] # for each HYD section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXLDS=None # plotted in each LDS section if X-time fits
,vAreasXLDS=None # for each LDS section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
,vLinesXColorLDS=None
,vAreasXColorLDS=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,ySpanMin=0.9
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSErgVec selbst
,plotLegend1stOnly=True
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# --- Args Fct. HYD ---:
,TCsLDSIn=pd.DataFrame() # es werden nur die aDct-definierten geplottet
,TCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorstehenden Dcts ist die ID (der Spaltenname) in den TCs
,TCsOPCScenTimeShift=pd.Timedelta('1 hour')
,TCsSIDEvents=pd.DataFrame() # es werden alle Schieberevents geplottet
,TCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,TCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,TCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={}
,pDct={}
,QDctOPC={}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={}
,plotRTTM=True
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #[0,50,100] #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,yGridSteps=yGridStepsD
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
# --- Args Fct. LDS ---:
,dfSegReprVec=pd.DataFrame()
,dfDruckReprVec=pd.DataFrame()
,ylimAL=ylimALD
,yticksAL=yticksALD
,ylimR=ylimRD #can be a list #None #(-10,10) #wenn undef., dann min/max dfSegReprVec
,ylimRxlim=False # can be a list #wenn Wahr und ylimR undef. (None), dann wird xlim beruecksichtigt bei min/max dfSegReprVec
,yticksR=yticksRD # can be a list of lists #[0,2,4,10,15,30,40] #wenn undef. (None), dann aus ylimR; matplotlib "vergrößert" mit dem Setzen von yTicks ein ebenfalls gesetztes ylim wenn die Ticks außerhalb des ylims liegen
# <NAME>.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
fig=plt.gcf()
if orientation=='landscape':
# oben HYD unten LDS
gsHYD = gridspec.GridSpec(1,len(xlims),figure=fig)
axLstHYD=[fig.add_subplot(gsHYD[idx]) for idx in np.arange(gsHYD.ncols)]
gsLDS = gridspec.GridSpec(1,len(xlims),figure=fig)
axLstLDS=[fig.add_subplot(gsLDS[idx]) for idx in np.arange(gsLDS.ncols)]
else:
# links HYD rechts LDS
gsHYD = gridspec.GridSpec(len(xlims),1,figure=fig)
axLstHYD=[fig.add_subplot(gsHYD[idx]) for idx in np.arange(gsHYD.nrows)]
gsLDS = gridspec.GridSpec(len(xlims),1,figure=fig)
axLstLDS=[fig.add_subplot(gsLDS[idx]) for idx in np.arange(gsLDS.nrows)]
pltLDSpQAndEventsResults=plotTimespansHYD(
axLst=axLstHYD
,xlims=xlims
,figTitle=figTitle # ''
,figSave=figSave # False
,sectionTitles=sectionTitles
,sectionTexts=sectionTexts
,vLinesX=vLinesX
,hLinesY=hLinesY
,vAreasX=vAreasX
,vLinesXColor=vLinesXColor
,vAreasXColor=vAreasXColor
,plotLegend=plotLegend
,plotLegend1stOnly=plotLegend1stOnly
# --- Args Fct. ---:
,dfTCsLDSIn=TCsLDSIn
,dfTCsOPC=TCsOPC
,dfTCsOPCScenTimeShift=TCsOPCScenTimeShift
,dfTCsSIDEvents=TCsSIDEvents
,dfTCsSIDEventsTimeShift=TCsSIDEventsTimeShift
,dfTCsSIDEventsInXlimOnly=TCsSIDEventsInXlimOnly
,QDct=QDct
,pDct=pDct
,QDctOPC=QDctOPC
,pDctOPC=pDctOPC
,attrsDct=attrsDct
,fctsDct=fctsDct
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,plotRTTM=plotRTTM
,ylimp=ylimp
,ylabelp=ylabelp
,yticksp=yticksp
,ylimQ=ylimQ
,yticksQ=yticksQ
,yGridSteps=yGridSteps
,ylim3rd=ylim3rd
,yticks3rd=yticks3rd
)
if orientation=='landscape':
# oben HYD unten LDS
gsHYD.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectZeileOben)
else:
# links HYD rechts LDS
gsHYD.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectSpalteLinks)
if sectionTitlesLDS==None:
sectionTitlesLDS=sectionTitles
if sectionTextsLDS==None:
sectionTextsLDS=sectionTexts
if vLinesXLDS==None:
vLinesXLDS=vLinesX
if vAreasXLDS==None:
vAreasXLDS=vAreasX
if vLinesXColorLDS==None:
vLinesXColorLDS=vLinesXColor
if vAreasXColorLDS==None:
vAreasXColorLDS=vAreasXColor
pltLDSErgVecResults=plotTimespansLDS(
axLst=axLstLDS
,xlims=xlims
,figTitle=figTitle # ''
,figSave=figSave # False
,sectionTitles=sectionTitlesLDS
,sectionTexts=sectionTextsLDS
,vLinesX=vLinesXLDS
,vAreasX=vAreasXLDS
,vLinesXColor=vLinesXColorLDS
,vAreasXColor=vAreasXColorLDS
,plotLegend=plotLegend
,plotLegend1stOnly=plotLegend1stOnly
# --- Args Fct. ---:
,dfSegReprVec=dfSegReprVec
,dfDruckReprVec=dfDruckReprVec
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,ylimR=ylimR
,ylimRxlim=ylimRxlim
,yticksR=yticksR
,plotLPRate=plotLPRate
,plotR2FillSeg=plotR2FillSeg
,plotR2FillDruck=plotR2FillDruck
,plotAC=plotAC
,ylimAC=ylimAC
,ylimACxlim=ylimACxlim
,yticksAC=yticksAC
,plotTV=plotTV
,plotTVTimerFct=plotTVTimerFct
,plotTVAmFct=plotTVAmFct
,plotTVAmLabel=plotTVAmLabel
,ylimTV=ylimTV
,yticksTV=yticksTV
,plotDPDT=plotDPDT
,plotSB_S=plotSB_S
)
# wenn weniger als 5 Achsen geplottet werden stimmt der erste Wert von rectSpalteRechts nicht
#(axes,lines)=pltLDSErgVecResults[0]
#
# numOfYAxes=len(axes)
#corFac=5-numOfYAxes
#rectSpalteRechtsCor=rectSpalteRechts #[0.325, 0, 1, 1]
#rectSpalteRechtsCor[0]=rectSpalteRechtsCor[0]+0.06*corFac
if orientation=='landscape':
# oben HYD unten LDS
gsLDS.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectZeileUnten)
else:
# links HYD rechts LDS
gsLDS.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectSpalteRechts)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return gsHYD,gsLDS,pltLDSpQAndEventsResults,pltLDSErgVecResults
def plotTimespansHYD(
axLst # list of axes to be used
,xlims # list of sections
,figTitle='' # the title of the plot; will be extended by min. and max. time calculated over all sections; will be also the pdf and png fileName
,figSave=False #True # creates pdf and png
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,vLinesX=[] # plotted in each section if X-time fits
,hLinesY=[] # plotted in each section
,vAreasX=[] # for each section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
# --- Args Fct. ---:
,dfTCsLDSIn=pd.DataFrame() # es werden nur die aDct-definierten geplottet
,dfTCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorstehenden Dcts ist die ID (der Spaltenname) in den TCs
,dfTCsOPCScenTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEvents=pd.DataFrame() # es werden alle Schieberevents geplottet
,dfTCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,dfTCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={ # Exanple
'Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value':{'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,'Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value':{'IDPlt':'Q Snk','RTTM':'IMDI.Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value'}
}
,pDct={ # Example
'Objects.FBG_HPS_M.6_KED_39_PTI_01_E.In.MW.value':{'IDPlt':'p Src'}
,'Objects.FBG_HPS_M.6_TUD_39_PTI_01_E.In.MW.value':{'IDPlt':'p Snk'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_01_E.In.MW.value':{'IDPlt':'p ISrc 1'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_02_E.In.MW.value':{'IDPlt':'p ISnk 2'}
}
,QDctOPC={ # Exanple
'Objects.FBG_MESSW.6_EL1_39_FT_01.In.MW.value':{'IDPlt':'Q xSrc 1'}
}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={} # a Dct with Fcts
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None#[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,plotRTTM=True
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #[0,50,100] #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,yGridSteps=yGridStepsD
,ySpanMin=0.9 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSpQAndEvents selbst
,plotLegend1stOnly=True # diese Funktion plottet wenn plotLegend=True die Legende nur im ersten Plot
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
# plots pltLDSpQAndEvents-Sections
# returns a Lst of pltLDSpQAndEvents-Results, a Lst of (axes,lines,scatters)
try:
if sectionTitles==[] or sectionTitles==None:
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if not isinstance(sectionTitles, list):
logger.warning("{0:s}sectionTitles muss eine Liste von strings sein.".format(logStr))
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if len(sectionTitles)!=len(xlims):
logger.debug("{0:s}sectionTitles muss dieselbe Laenge haben wie xlims.".format(logStr))
if len(sectionTitles) == 1:
sectionTitles=len(xlims)*[sectionTitles[0]]
else:
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if sectionTexts==[] or sectionTexts==None:
sectionTexts=len(xlims)*['']
if not isinstance(sectionTexts, list):
logger.warning("{0:s}sectionTexts muss eine Liste von strings sein.".format(logStr))
sectionTexts=len(xlims)*['']
if len(sectionTexts)!=len(xlims):
logger.warning("{0:s}sectionTexts muss dieselbe Laenge haben wie xlims.".format(logStr))
sectionTexts=len(xlims)*['']
if plotLegend:
plotLegendFct=False
else:
plotLegendFct=True
pltLDSpQAndEventsResults=[]
for idx,xlim in enumerate(xlims):
ax = axLst[idx]
if isinstance(dateFormat, list):
dateFormatIdx=dateFormat[idx]
else:
dateFormatIdx=dateFormat
bysecondIdx=bysecond
if isinstance(bysecond, list):
if any(isinstance(el, list) for el in bysecond):
bysecondIdx=bysecond[idx]
byminuteIdx=byminute
if isinstance(byminute, list):
if any(isinstance(el, list) for el in byminute):
byminuteIdx=byminute[idx]
byhourIdx=byhour
if isinstance(byhour, list):
if any(isinstance(el, list) for el in byhour):
byhourIdx=byhour[idx]
(axes,lines,scatters)=pltLDSpQAndEvents(
ax
,dfTCsLDSIn=dfTCsLDSIn
,dfTCsOPC=dfTCsOPC
,dfTCsOPCScenTimeShift=dfTCsOPCScenTimeShift
,dfTCsSIDEvents=dfTCsSIDEvents
,dfTCsSIDEventsTimeShift=dfTCsSIDEventsTimeShift
,dfTCsSIDEventsInXlimOnly=dfTCsSIDEventsInXlimOnly
,dfTCsSIDEventsyOffset=dfTCsSIDEventsyOffset
,QDct=QDct
,pDct=pDct
,QDctOPC=QDctOPC
,pDctOPC=pDctOPC
,attrsDct=attrsDct
,fctsDct=fctsDct
,xlim=xlim
,dateFormat=dateFormatIdx
,bysecond=bysecondIdx
,byminute=byminuteIdx
,byhour=byhourIdx
,plotRTTM=plotRTTM
,ylimp=ylimp
,ylabelp=ylabelp
,yticksp=yticksp
,ylimQ=ylimQ
,yticksQ=yticksQ
# 3. Achse
,ylim3rd=ylim3rd
,yticks3rd=yticks3rd
,yGridSteps=yGridSteps
,plotLegend=plotLegendFct
,baseColorsDef=baseColorsDef
)
pltLDSpQAndEventsResults.append((axes,lines,scatters))
sectionText=sectionTexts[idx]
ax.text(
0.5, 0.5,
sectionText,
ha='center', va='top',
transform=ax.transAxes
)
(timeStart,timeEnd)=xlim
sectionTitleSingle="{:s}: Plot Nr. {:d} - Zeitspanne: {:s}".format(sectionTitles[idx],idx+1,str(timeEnd-timeStart)).replace('days','Tage')
ax.set_title(sectionTitleSingle)
for vLineX in vLinesX:
if vLineX >= timeStart and vLineX <= timeEnd:
ax.axvline(x=vLineX,ymin=0, ymax=1, color=vLinesXColor,ls=linestyle_tuple[11][1])
for hLineY in hLinesY:
ax.axhline(y=hLineY,xmin=0, xmax=1,color='gray',ls=linestyle_tuple[11][1])
if len(vAreasX) == len(xlims):
vAreasXSection=vAreasX[idx]
if vAreasXSection==[] or vAreasXSection==None:
pass
else:
for vArea in vAreasXSection:
ax.axvspan(vArea[0], vArea[1], alpha=0.6, color=vAreasXColor)
else:
if len(vAreasX)>0:
logger.warning("{0:s}vAreasX muss dieselbe Laenge haben wie xlims.".format(logStr))
# Legend
if plotLegend:
legendHorizontalPos='center'
if len(xlims)>1:
if idx in [0,2,4]: # Anfahren ...
legendHorizontalPos='right'
elif idx in [1,3,5]: # Abfahren ...
legendHorizontalPos='left'
if plotLegend1stOnly:
legendHorizontalPos='center' # wenn nur 1x Legende dann Mitte
if plotLegend1stOnly and idx>0:
pass
else:
patterBCp='^p S[rc|nk]'
patterBCQ='^Q S[rc|nk]'
patterBCpQ='^[p|Q] S[rc|nk]'
linesp=[line for line in lines if re.search(patterBCp,line) != None]
linesQ=[line for line in lines if re.search(patterBCQ,line) != None]
linespTxt=tuple([lines[line] for line in linesp])
linesQTxt=tuple([lines[line] for line in linesQ])
moreLines=[line for line in lines if re.search(patterBCpQ,line) == None]
moreLinesp=[line for line in moreLines if re.search('^p',line) != None]
moreLinesQ=[line for line in moreLines if re.search('^Q',line) != None]
moreLinespTxt=tuple([lines[line] for line in moreLinesp])
moreLinesQTxt=tuple([lines[line] for line in moreLinesQ])
axes['p'].add_artist(axes['p'].legend(
linespTxt+moreLinespTxt
,linesp+moreLinesp
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
axes['Q'].add_artist(axes['Q'].legend(
linesQTxt+moreLinesQTxt
,linesQ+moreLinesQ
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if 'SID' in axes.keys() and len(scatters)>0:
if legendHorizontalPos == 'center':
legendHorizontalPosAct=''
else:
legendHorizontalPosAct=' '+legendHorizontalPos
axes['SID'].legend(loc='center'+legendHorizontalPosAct
,framealpha=legendFramealpha
,facecolor=legendFacecolor)
# Titel
tMin=xlims[0][0]
tMax=xlims[-1][1]
for tPair in xlims:
(t1,t2)=tPair
if t1 < tMin:
tMin=t1
if t2>tMax:
tMax=t2
if figTitle not in ['',None]:
figTitle="{:s} - {:s} - {:s}".format(figTitle,str(tMin),str(tMax)).replace(':',' ')
fig=plt.gcf()
fig.suptitle(figTitle)
# speichern?!
if figSave:
fig.tight_layout(pad=2.) # gs.tight_layout(fig,pad=2.)
plt.savefig(figTitle+'.png')
plt.savefig(figTitle+'.pdf')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return pltLDSpQAndEventsResults
def plotTimespansLDS(
axLst # list of axes to be used
,xlims # list of sections
,figTitle='' # the title of the plot; will be extended by min. and max. time calculated over all sections; will be also the pdf and png fileName
,figSave=False #True # creates pdf and png
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,vLinesX=[] # plotted in each section if X-time fits
,vAreasX=[] # for each section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
# --- Args Fct. ---:
,dfSegReprVec=pd.DataFrame()
,dfDruckReprVec=pd.DataFrame()
#,xlim=None
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None #[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,ylimAL=ylimALD
,yticksAL=yticksALD
,yTwinedAxesPosDeltaHPStart=-0.0125
,yTwinedAxesPosDeltaHP=-0.0875
,ylimR=ylimRD # can be a list
,ylimRxlim=False # can be a list
,yticksR=yticksRD # can be a list
# dito Beschl.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,ySpanMin=0.9
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSErgVec selbst
,plotLegend1stOnly=True # diese Funktion plottet wenn plotLegend=True die Legende nur im ersten Plot
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
# plots pltLDSErgVec-Sections
# returns a Lst of pltLDSErgVec-Results, a Lst of (axes,lines)
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if sectionTitles==[] or sectionTitles ==None:
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if not isinstance(sectionTitles, list):
logger.warning("{0:s}sectionTitles muss eine Liste von strings sein.".format(logStr))
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if len(sectionTitles)!=len(xlims):
logger.debug("{0:s}sectionTitles muss dieselbe Laenge haben wie xlims.".format(logStr))
if len(sectionTitles) == 1:
sectionTitles=len(xlims)*[sectionTitles[0]]
else:
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if sectionTexts==[] or sectionTexts==None:
sectionTexts=len(xlims)*['']
if not isinstance(sectionTexts, list):
logger.warning("{0:s}sectionTexts muss eine Liste von strings sein.".format(logStr))
sectionTexts=len(xlims)*['']
if len(sectionTexts)!=len(xlims):
logger.warning("{0:s}sectionTexts muss dieselbe Laenge haben wie xlims.".format(logStr))
sectionTexts=len(xlims)*['']
if plotLegend:
plotLegendFct=False
else:
plotLegendFct=True
pltLDSErgVecResults=[]
for idx,xlim in enumerate(xlims):
ax = axLst[idx]
if isinstance(dateFormat, list):
dateFormatIdx=dateFormat[idx]
else:
dateFormatIdx=dateFormat
bysecondIdx=bysecond
if isinstance(bysecond, list):
if any(isinstance(el, list) for el in bysecond):
bysecondIdx=bysecond[idx]
byminuteIdx=byminute
if isinstance(byminute, list):
if any(isinstance(el, list) for el in byminute):
byminuteIdx=byminute[idx]
byhourIdx=byhour
if isinstance(byhour, list):
if any(isinstance(el, list) for el in byhour):
byhourIdx=byhour[idx]
ylimRIdx=ylimR
if isinstance(ylimR, list):
ylimRIdx=ylimR[idx]
ylimRxlimIdx=ylimRxlim
if isinstance(ylimRxlim, list):
ylimRxlimIdx=ylimRxlim[idx]
yticksRIdx=yticksR
if isinstance(yticksR, list):
if any(isinstance(el, list) for el in yticksR):
yticksRIdx=yticksR[idx]
(axes,lines)=pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec
,dfDruckReprVec=dfDruckReprVec
,xlim=xlims[idx]
,dateFormat=dateFormatIdx
,bysecond=bysecondIdx
,byminute=byminuteIdx
,byhour=byhourIdx
,ylimAL=ylimAL
,yticksAL=yticksAL
,yTwinedAxesPosDeltaHPStart=yTwinedAxesPosDeltaHPStart
,yTwinedAxesPosDeltaHP=yTwinedAxesPosDeltaHP
,ylimR=ylimRIdx
,ylimRxlim=ylimRxlimIdx
,yticksR=yticksRIdx
,ylimAC=ylimAC
,ylimACxlim=ylimACxlim
,yticksAC=yticksAC
,ySpanMin=ySpanMin
,plotLegend=plotLegendFct
,legendLoc=legendLoc
,legendFramealpha=legendFramealpha
,legendFacecolor=legendFacecolor
,attrsDctLDS=attrsDctLDS
,plotLPRate=plotLPRate
,plotR2FillSeg=plotR2FillSeg
,plotR2FillDruck=plotR2FillDruck
,plotAC=plotAC
,plotACCLimits=plotACCLimits
,highlightAreas=highlightAreas
,Seg_Highlight_Color=Seg_Highlight_Color
,Seg_Highlight_Alpha=Seg_Highlight_Alpha
,Seg_Highlight_Fct=Seg_Highlight_Fct
,Seg_HighlightError_Color=Seg_HighlightError_Color
,Seg_Highlight_Alpha_Error=Seg_Highlight_Alpha_Error #
,Seg_HighlightError_Fct=Seg_HighlightError_Fct
,Druck_Highlight_Color=Druck_Highlight_Color
,Druck_Highlight_Alpha=Druck_Highlight_Alpha
,Druck_Highlight_Fct=Druck_Highlight_Fct
,Druck_HighlightError_Color=Druck_HighlightError_Color
,Druck_Highlight_Alpha_Error=Druck_Highlight_Alpha_Error #
,Druck_HighlightError_Fct=Druck_HighlightError_Fct
,plotTV=plotTV
,plotTVTimerFct=plotTVTimerFct
,plotTVAmFct=plotTVAmFct
,plotTVAmLabel=plotTVAmLabel
,ylimTV=ylimTV
,yticksTV=yticksTV
,plotDPDT=plotDPDT
,plotSB_S=plotSB_S
)
pltLDSErgVecResults.append((axes,lines))
sectionText=sectionTexts[idx]
ax.text(
0.5, 0.5,
sectionText,
ha='center', va='top',
transform=ax.transAxes
)
(timeStart,timeEnd)=xlim
sectionTitleSingle="{:s}: Plot Nr. {:d} - Zeitspanne: {:s}".format(sectionTitles[idx],idx+1,str(timeEnd-timeStart)).replace('days','Tage')
ax.set_title(sectionTitleSingle)
for vLineX in vLinesX:
if vLineX >= timeStart and vLineX <= timeEnd:
ax.axvline(x=vLineX,ymin=0, ymax=1, color=vLinesXColor,ls=linestyle_tuple[11][1])
if len(vAreasX) == len(xlims):
vAreasXSection=vAreasX[idx]
if vAreasXSection==[] or vAreasXSection==None:
pass
else:
for vArea in vAreasXSection:
ax.axvspan(vArea[0], vArea[1], alpha=0.6, color=vAreasXColor)
else:
if len(vAreasX)>0:
logger.warning("{0:s}vAreasX muss dieselbe Laenge haben wie xlims.".format(logStr))
# Legend
if plotLegend:
legendHorizontalPos='center'
if len(xlims)>1:
if idx in [0,2,4]: # Anfahren ...
legendHorizontalPos='right'
elif idx in [1,3,5]: # Abfahren ...
legendHorizontalPos='left'
if plotLegend1stOnly and idx>0:
pass
else:
if not dfSegReprVec.empty:
patternSeg='Seg$'
axes['A'].add_artist(axes['A'].legend(
tuple([lines[line] for line in lines if re.search(patternSeg,line) != None])
,tuple([line for line in lines if re.search(patternSeg,line) != None])
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if not dfDruckReprVec.empty:
patternDruck='Drk$'
axes['A'].add_artist(axes['A'].legend(
tuple([lines[line] for line in lines if re.search(patternDruck,line) != None])
,tuple([line for line in lines if re.search(patternDruck,line) != None])
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
# Titel
tMin=xlims[0][0]
tMax=xlims[-1][1]
for tPair in xlims:
(t1,t2)=tPair
if t1 < tMin:
tMin=t1
if t2>tMax:
tMax=t2
if figTitle not in ['',None]:
figTitle="{:s} - {:s} - {:s}".format(figTitle,str(tMin),str(tMax)).replace(':',' ')
fig=plt.gcf()
fig.suptitle(figTitle)
# speichern?!
if figSave:
fig.tight_layout(pad=2.) # gs.tight_layout(fig,pad=2.)
plt.savefig(figTitle+'.png')
plt.savefig(figTitle+'.pdf')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return pltLDSErgVecResults
def pltLDSpQAndEvents(
ax
,dfTCsLDSIn # es werden nur die aDct-definierten geplottet
,dfTCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorgenannten Dcts ist die ID (der Spaltenname) in den TCs
,dfTCsOPCScenTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEvents=pd.DataFrame()
,dfTCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,dfTCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={ # Exanple
'Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value':{'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,'Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value':{'IDPlt':'Q Snk','RTTM':'IMDI.Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value'}
}
,pDct={# Example
'Objects.FBG_HPS_M.6_KED_39_PTI_01_E.In.MW.value':{'IDPlt':'p Src'}
,'Objects.FBG_HPS_M.6_TUD_39_PTI_01_E.In.MW.value':{'IDPlt':'p Snk'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_01_E.In.MW.value':{'IDPlt':'p ISrc 1'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_02_E.In.MW.value':{'IDPlt':'p ISnk 2'}
}
,QDctOPC={ # Exanple
'Objects.FBG_MESSW.6_EL1_39_FT_01.In.MW.value':{'IDPlt':'Q xSnk 1'}
}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={} # a Dct with Fcts
,xlim=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,bysecond=None #[0,15,30,45]
,byminute=None
,byhour=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,plotRTTM=True # plot RTTM-Echoes
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None #wenn undef., dann aus ylimQ
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,ylabel3rd='Schieber (ZUSTände 0,1,2 jew. + x; Befehle)'
,yGridSteps=30 # 0: das y-Gitter besteht dann bei ylimp=ylimQ=yticksp=yticksQ None nur aus min/max (also 1 Gitterabschnitt)
,ySpanMin=0.9 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,plotLegend=True
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
):
"""
zeichnet pq-Zeitkurven - ggf. ergaenzt durch Events
Returns:
* axes (Dct of axes)
* lines (Dct of lines)
* scatters (List of ax.scatter-Results)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
axes={}
lines={}
scatters=[]
try:
axes['p']=ax
# x-Achse ----------------
if xlim == None:
xlimMin=dfTCsLDSIn.index[0]
xlimMax=dfTCsLDSIn.index[-1]
xlim=(xlimMin,xlimMax)
(xlimMin,xlimMax)=xlim
ax.set_xlim(xlim)
logger.debug("{0:s}dfTCsOPCScenTimeShift: {1:s}".format(logStr,str(dfTCsOPCScenTimeShift)))
logger.debug("{0:s}bysecond: {1:s}".format(logStr,str(bysecond)))
logger.debug("{0:s}byminute: {1:s}".format(logStr,str(byminute)))
logger.debug("{0:s}byhour: {1:s}".format(logStr,str(byhour)))
pltHelperX(
ax
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart
)
# Eindeutigkeit der IDPlts pruefen
keys=[]
keysUneindeutig=[]
for dct in [QDct,pDct,QDctOPC,pDctOPC]:
for key, value in dct.items():
if IDPltKey in value.keys():
IDPltValue=value[IDPltKey]
if IDPltValue in keys:
print("IDPlt {:s} bereits vergeben".format(IDPltValue))
keysUneindeutig.append(IDPltValue)
else:
keys.append(IDPltValue)
# 1. Achse p -----------------------
logger.debug("{0:s}{1:s}".format(logStr,'# 1. Achse p'))
for key, value in pDct.items(): # nur die konfigurierten IDs plotten
if key in dfTCsLDSIn.columns: # nur dann, wenn ID als Spalte enthalten
label, linesAct = pltLDSpQHelper(
ax
,TCdf=dfTCsLDSIn
,ID=key # Spaltenname
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct # a Dct with - i.e. {'Q Src':{'color':'red'},...}
,IDPltKey=IDPltKey # Schluesselbezeichner in value
,IDPltValuePostfix=None
,xDctFcts=fctsDct
#,timeShift=pd.Timedelta('1 hour') # pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
if 'RTTM' in value.keys():
if value['RTTM'] in dfTCsLDSIn.columns:
label, linesAct = pltLDSpQHelper(
ax
,TCdf=dfTCsLDSIn
,ID=value['RTTM']
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=' RTTM'
,xDctFcts=fctsDct
#,timeShift=pd.Timedelta('0 hour') #pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,value['RTTM']))
if not dfTCsOPC.empty:
logger.debug("{0:s}{1:s}".format(logStr,'# 1. Achse p OPC'))
for key, value in pDctOPC.items():
if key in dfTCsOPC.columns:
label, linesAct = pltLDSpQHelper(
ax
,TCdf=dfTCsOPC
,ID=key
,xDctValue=value
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=None
,xDctFcts=fctsDct
,timeShift=dfTCsOPCScenTimeShift
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
ylimp,yticksp=pltLDSpQHelperYLimAndTicks(
dfTCsLDSIn
,pDct.keys()
,ylim=ylimp
,yticks=yticksp
,ylimxlim=ylimpxlim
,xlim=xlim
,ySpanMin=ySpanMin
,yGridSteps=yGridSteps
)
ax.set_ylim(ylimp)
ax.set_yticks(yticksp)
ax.grid()
ax.set_zorder(10)
ax.patch.set_visible(False)
ax.set_ylabel(ylabelp)
# 2. y-Achse Q ----------------------------------------
logger.debug("{0:s}{1:s}".format(logStr,'# 2. Achse Q'))
ax2 = ax.twinx()
axes['Q']=ax2
pltHelperX(
ax2
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+yTwinedAxesPosDeltaHP
)
for key, value in QDct.items():
if key in dfTCsLDSIn.columns:
label, linesAct = pltLDSpQHelper(
ax2
,TCdf=dfTCsLDSIn
,ID=key
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=None
,xDctFcts=fctsDct
# ,timeShift=pd.Timedelta('0 hour') # pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
if 'RTTM' in value.keys() and plotRTTM:
if value['RTTM'] in dfTCsLDSIn.columns:
label, linesAct = pltLDSpQHelper(
ax2
,TCdf=dfTCsLDSIn
,ID=value['RTTM']
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=' RTTM'
,xDctFcts=fctsDct
#,timeShift=pd.Timedelta('0 hour') #pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,value['RTTM']))
if not dfTCsOPC.empty:
logger.debug("{0:s}{1:s}".format(logStr,'# 2. Achse Q OPC'))
for key, value in QDctOPC.items():
if key in dfTCsOPC.columns:
label, linesAct = pltLDSpQHelper(
ax2
,TCdf=dfTCsOPC
,ID=key
,xDctValue=value
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=None
,xDctFcts=fctsDct
,timeShift=dfTCsOPCScenTimeShift
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
pltLDSHelperY(ax2)
ylimQ,yticksQ=pltLDSpQHelperYLimAndTicks(
dfTCsLDSIn
,QDct.keys()
,ylim=ylimQ
,yticks=yticksQ
,ylimxlim=ylimQxlim
,xlim=xlim
,ySpanMin=ySpanMin
,yGridSteps=yGridSteps
)
ax2.set_ylim(ylimQ)
ax2.set_yticks(yticksQ)
ax2.grid()
ax2.set_ylabel(ylabelQ)
# ggf. 3. Achse
if not dfTCsSIDEvents.empty:
logger.debug("{0:s}{1:s}".format(logStr,'# 3. Achse SID'))
ax3 = ax.twinx()
axes['SID']=ax3
pltHelperX(
ax3
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+2*yTwinedAxesPosDeltaHP
)
if dfTCsSIDEventsInXlimOnly:
# auf xlim beschränken
dfTCsSIDEventsPlot=dfTCsSIDEvents[
(dfTCsSIDEvents.index-dfTCsSIDEventsTimeShift>=xlim[0])
&#:
(dfTCsSIDEvents.index-dfTCsSIDEventsTimeShift<=xlim[1])
]
# weiter beschränken auf die, die in xlim mind. 1 Eintrag haben
dfTCsSIDEventsPlot=dfTCsSIDEventsPlot.dropna(axis=1,how='all')
else:
dfTCsSIDEventsPlot=dfTCsSIDEvents
# doppelte bzw. mehrfache Spaltennamen eliminieren (das waere ein Aufruf-Fehler)
dfTCsSIDEventsPlot = dfTCsSIDEventsPlot.loc[:,~dfTCsSIDEventsPlot.columns.duplicated()]
logger.debug("{:s}dfTCsSIDEventsPlot.dropna(how='all'): {:s}".format(logStr,dfTCsSIDEventsPlot.dropna(how='all').to_string()))
if not dfTCsSIDEventsPlot.dropna(how='all').empty: # mind. 1 Ereignis in irgendeiner Spalte muss ueberbleiben
# aus Performanzgruenden wird nur zum Plot gegeben, was in xlim auch zu sehen sein wird
dfTCsSIDEventsPlot2=dfTCsSIDEventsPlot[
( dfTCsSIDEventsPlot.index-dfTCsSIDEventsTimeShift>=xlim[0])
&#:
( dfTCsSIDEventsPlot.index-dfTCsSIDEventsTimeShift<=xlim[1])
]
labelsOneCall,scattersOneCall=pltLDSSIDHelper(
ax3
,dfTCsSIDEventsPlot2
,dfTCsSIDEventsTimeShift
,dfTCsSIDEventsyOffset
,pSIDEvents
,valRegExMiddleCmds
,eventCCmds
,eventCStats
,markerDef
,baseColorsDef
)
scatters=scatters+scattersOneCall
pltLDSHelperY(ax3)
ax3.set_ylim(ylim3rd)
ax3.set_yticks(yticks3rd)
ax3.set_ylabel(ylabel3rd)
if plotLegend:
legendHorizontalPos='center'
patterBCp='^p S[rc|nk]'
patterBCQ='^Q S[rc|nk]'
patterBCpQ='^[p|Q] S[rc|nk]'
linesp=[line for line in lines if re.search(patterBCp,line) != None]
linesQ=[line for line in lines if re.search(patterBCQ,line) != None]
linespTxt=tuple([lines[line] for line in linesp])
linesQTxt=tuple([lines[line] for line in linesQ])
moreLines=[line for line in lines if re.search(patterBCpQ,line) == None]
moreLinesp=[line for line in moreLines if re.search('^p',line) != None]
moreLinesQ=[line for line in moreLines if re.search('^Q',line) != None]
moreLinespTxt=tuple([lines[line] for line in moreLinesp])
moreLinesQTxt=tuple([lines[line] for line in moreLinesQ])
axes['p'].add_artist(axes['p'].legend(
linespTxt+moreLinespTxt
,linesp+moreLinesp
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
axes['Q'].add_artist(axes['Q'].legend(
linesQTxt+moreLinesQTxt
,linesQ+moreLinesQ
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if 'SID' in axes.keys() and len(scatters)>0:
if legendHorizontalPos == 'center':
legendHorizontalPosAct=''
else:
legendHorizontalPosAct=' '+legendHorizontalPos
axes['SID'].legend(loc='center'+legendHorizontalPosAct
,framealpha=legendFramealpha
,facecolor=legendFacecolor)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return axes,lines,scatters
def pltLDSErgVec(
ax=None # Axes auf die geplottet werden soll (und aus der neue axes ge-twinx-ed werden; plt.gcf().gca() wenn undef.
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=None # tuple (xmin,xmax); wenn undef. gelten min/max aus vorgenannten Daten als xlim; wenn Seg angegeben, gilt Seg
,dateFormat='%y.%m.%d: %H:%M:%S'
,bysecond=None #[0,15,30,45]
,byminute=None
,byhour=None
,ylimAL=ylimALD
,yticksAL=yticksALD #[0,10,20,30,40]
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,ylimR=ylimRD #None #(-10,10) #wenn undef., dann min/max dfSegReprVec
,ylimRxlim=False #wenn Wahr und ylimR undef. (None), dann wird xlim beruecksichtigt bei min/max dfSegReprVec
,yticksR=yticksRD #[0,2,4,10,15,30,40] #wenn undef. (None), dann aus ylimR; matplotlib "vergrößert" mit dem Setzen von yTicks ein ebenfalls gesetztes ylim wenn die Ticks außerhalb des ylims liegen
# <NAME>.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,ySpanMin=0.9 # wenn ylim R/AC undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,plotLegend=True
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
"""
zeichnet Zeitkurven von App LDS Ergebnisvektoren auf ax
return: axes (Dct der Achsen), yLines (Dct der Linien)
Dct der Achsen: 'A': Alarm etc.; 'R': m3/h; 'a': ACC; 'TV': Timer und Leckvolumen
#! Lücken (nicht plotten) wenn keine Zeiten
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
axes={}
yLines={}
try:
if dfSegReprVec.empty and dfDruckReprVec.empty:
logger.error("{0:s}{1:s}".format(logStr,'dfSegReprVec UND dfDruckReprVec leer?! Return.'))
return
if not dfSegReprVec.empty:
# keine komplett leeren Zeilen
dfSegReprVec=dfSegReprVec[~dfSegReprVec.isnull().all(1)]
# keine doppelten Indices
dfSegReprVec=dfSegReprVec[~dfSegReprVec.index.duplicated(keep='last')] # dfSegReprVec.groupby(dfSegReprVec.index).last() # df[~df.index.duplicated(keep='last')]
if not dfDruckReprVec.empty:
# keine komplett leeren Zeilen
dfDruckReprVec=dfDruckReprVec[~dfDruckReprVec.isnull().all(1)]
# keine doppelten Indices
dfDruckReprVec=dfDruckReprVec[~dfDruckReprVec.index.duplicated(keep='last')] # dfDruckReprVec.groupby(dfDruckReprVec.index).last() # df[~df.index.duplicated(keep='last')]
if ax==None:
ax=plt.gcf().gca()
axes['A']=ax
# x-Achse ----------------
if xlim == None:
if not dfSegReprVec.empty:
xlimMin=dfSegReprVec.index[0]
xlimMax=dfSegReprVec.index[-1]
elif not dfDruckReprVec.empty:
xlimMin=dfDruckReprVec.index[0]
xlimMax=dfDruckReprVec.index[-1]
xlim=(xlimMin,xlimMax)
(xlimMin,xlimMax)=xlim
ax.set_xlim(xlim)
logger.debug("{0:s}bysecond: {1:s}".format(logStr,str(bysecond)))
logger.debug("{0:s}byminute: {1:s}".format(logStr,str(byminute)))
logger.debug("{0:s}byhour: {1:s}".format(logStr,str(byhour)))
logger.debug("{0:s}dateFormat: {1:s}".format(logStr,dateFormat))
pltHelperX(
ax
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart
)
# 1. Achse Alarm -----------------------
if not dfSegReprVec.empty and highlightAreas:
tPairs=findAllTimeIntervalls(dfSegReprVec,Seg_Highlight_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Seg_Highlight_Alpha, color=Seg_Highlight_Color)
tPairs=findAllTimeIntervalls(dfSegReprVec,Seg_HighlightError_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Seg_Highlight_Alpha_Error, color=Seg_HighlightError_Color)
if not dfDruckReprVec.empty and highlightAreas:
tPairs=findAllTimeIntervalls(dfDruckReprVec,Druck_Highlight_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Druck_Highlight_Alpha, color=Druck_Highlight_Color)
tPairs=findAllTimeIntervalls(dfDruckReprVec,Druck_HighlightError_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Druck_Highlight_Alpha_Error, color=Druck_HighlightError_Color)
if not dfSegReprVec.empty:
lines = pltLDSErgVecHelper(ax,dfSegReprVec,'AL_S',attrsDctLDS['Seg_AL_S_Attrs'])
yLines['AL_S Seg']=lines[0]
if not dfDruckReprVec.empty:
lines = pltLDSErgVecHelper(ax,dfDruckReprVec,'AL_S',attrsDctLDS['Druck_AL_S_Attrs'])
yLines['AL_S Drk']=lines[0]
if not dfSegReprVec.empty and plotSB_S:
lines = pltLDSErgVecHelper(ax,dfSegReprVec,'SB_S',attrsDctLDS['Seg_SB_S_Attrs'],fct=lambda x: x*10)
yLines['SB_S Seg']=lines[0]
if not dfDruckReprVec.empty and plotSB_S:
lines = pltLDSErgVecHelper(ax,dfDruckReprVec,'SB_S',attrsDctLDS['Druck_SB_S_Attrs'],fct=lambda x: x*10)
yLines['SB_S Drk']=lines[0]
ax.set_ylim(ylimAL)
ax.set_yticks(yticksAL)
ax.grid()
ax.set_zorder(10)
ax.patch.set_visible(False)
ax.set_ylabel('A [0/10/20] u. 10x B [0/1/2/3/4]')
# 2. y-<NAME> ----------------------------------------
ax2 = ax.twinx()
axes['R']=ax2
pltHelperX(
ax2
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+yTwinedAxesPosDeltaHP
)
pltLDSHelperY(ax2)
if not dfSegReprVec.empty:
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'MZ_AV',attrsDctLDS['Seg_MZ_AV_Attrs'])
yLines['MZ_AV (R1) Seg']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'LR_AV',attrsDctLDS['Seg_LR_AV_Attrs'])
yLines['LR_AV (R2) Seg']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'NG_AV',attrsDctLDS['Seg_NG_AV_Attrs'])
yLines['NG_AV Seg']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'QM_AV',attrsDctLDS['Seg_QM16_AV_Attrs'],fct=lambda x: x*1.6/100.)
yLines['QM16_AV Seg']=lines[0]
if plotLPRate:
# R2 = R1 - LP
# R2 - R1 = -LP
# LP = R1 - R2
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'LP_AV',attrsDctLDS['Seg_LP_AV_Attrs'])
yLines['LP_AV Seg']=lines[0]
if plotR2FillSeg:
df=dfSegReprVec
df=df.reindex(pd.date_range(start=df.index[0], end=df.index[-1], freq='1s'))
df=df.fillna(method='ffill').fillna(method='bfill')
# R2 unter 0
dummy=ax2.fill_between(df.index, df['LR_AV'],0
,where=df['LR_AV']<0,color='grey',alpha=.2)
# zwischen R2 und 0
dummy=ax2.fill_between(df.index, 0, df['LR_AV']
,where=df['LR_AV']>0
#,color='yellow',alpha=.1
,color='red',alpha=.1
)
# R2 über 0 aber unter NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=(df['LR_AV']>0) & (df['LR_AV']<df['NG_AV'])
#,color='red',alpha=.1
,color='yellow',alpha=.1
)
# R2 über NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=df['LR_AV']>df['NG_AV']
,color='red',alpha=.2)
if not dfDruckReprVec.empty:
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'MZ_AV',attrsDctLDS['Druck_MZ_AV_Attrs'])
yLines['MZ_AV (R1) Drk']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'LR_AV',attrsDctLDS['Druck_LR_AV_Attrs'])
yLines['LR_AV (R2) Drk']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'NG_AV',attrsDctLDS['Druck_NG_AV_Attrs'])
yLines['NG_AV Drk']=lines[0]
if plotLPRate:
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'LP_AV',attrsDctLDS['Druck_LP_AV_Attrs'])
yLines['LP_AV Drk']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'QM_AV',attrsDctLDS['Druck_QM16_AV_Attrs'],fct=lambda x: x*1.6/100.)
yLines['QM16_AV Drk']=lines[0]
if plotR2FillDruck:
df=dfDruckReprVec
df=df.reindex(pd.date_range(start=df.index[0], end=df.index[-1], freq='1s'))
df=df.fillna(method='ffill').fillna(method='bfill')
# R2 unter 0
dummy=ax2.fill_between(df.index, df['LR_AV'],0
,where=df['LR_AV']<0,color='grey',alpha=.4)
# zwischen R2 und 0
dummy=ax2.fill_between(df.index, 0, df['LR_AV']
,where=df['LR_AV']>0
#,color='yellow',alpha=.1
,color='red',alpha=.1
)
# R2 über 0 aber unter NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=(df['LR_AV']>0) & (df['LR_AV']<df['NG_AV'])
#,color='red',alpha=.1
,color='yellow',alpha=.1
)
# R2 über NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=df['LR_AV']>df['NG_AV']
,color='red',alpha=.2)
ylimSeg,yticksSeg=pltLDSErgVecHelperYLimAndTicks(
dfSegReprVec
,'LR_AV'
,ylim=ylimR
,yticks=yticksR
,ylimxlim=ylimRxlim
,xlim=xlim
,ySpanMin=ySpanMin
)
logger.debug("{0:s}ylimRSeg: {1:s} yticksRSeg: {2:s}".format(logStr,str(ylimSeg),str(yticksSeg)))
ylimDrk,yticksDrk=pltLDSErgVecHelperYLimAndTicks(
dfDruckReprVec
,'LR_AV'
,ylim=ylimR
,yticks=yticksR
,ylimxlim=ylimRxlim
,xlim=xlim
,ySpanMin=ySpanMin
)
logger.debug("{0:s}ylimRDrk: {1:s} yticksRDrk: {2:s}".format(logStr,str(ylimDrk),str(yticksDrk)))
if ylimSeg[1]>=ylimDrk[1]:
ylimR=ylimSeg
yticksR=yticksSeg
else:
ylimR=ylimDrk
yticksR=yticksDrk
logger.debug("{0:s}ylimR: {1:s} yticksR: {2:s}".format(logStr,str(ylimR),str(yticksR)))
ax2.set_ylim(ylimR)
ax2.set_yticks(yticksR)
ax2.grid()
ax2.set_ylabel('R1, R2, NG, LP (R1-R2), QM 1.6% [Nm³/h]')
# 3. y-Achse Beschleunigung ----------------------------------------
if plotAC:
# 3. y-Achse Beschleunigung -------------------------------------------------
ax3 = ax.twinx()
axes['a']=ax3
pltHelperX(
ax3
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+2*yTwinedAxesPosDeltaHP
)
pltLDSHelperY(ax3)
if not dfSegReprVec.empty:
lines = pltLDSErgVecHelper(ax3,dfSegReprVec,'AC_AV',attrsDctLDS['Seg_AC_AV_Attrs'])
yLines['AC_AV Seg']=lines[0]
lines = pltLDSErgVecHelper(ax3,dfSegReprVec,'ACF_AV',attrsDctLDS['Seg_ACF_AV_Attrs'])
yLines['ACF_AV Seg']=lines[0]
if not dfDruckReprVec.empty:
lines = pltLDSErgVecHelper(ax3,dfDruckReprVec,'AC_AV',attrsDctLDS['Druck_AC_AV_Attrs'])
yLines['AC_AV Drk']=lines[0]
lines = pltLDSErgVecHelper(ax3,dfDruckReprVec,'ACF_AV',attrsDctLDS['Druck_ACF_AV_Attrs'])
yLines['ACF_AV Drk']=lines[0]
# ACC Limits
if plotACCLimits:
if not dfSegReprVec.empty:
# +
line=ax3.axhline(y=dfSegReprVec['ACCST_AV'].max())
for prop,value in attrsDctLDS['Seg_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
line=ax3.axhline(y=dfSegReprVec['ACCTR_AV'].max())
for prop,value in attrsDctLDS['Seg_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
if not dfDruckReprVec.empty:
# +
line=ax3.axhline(y=dfDruckReprVec['ACCST_AV'].max())
for prop,value in attrsDctLDS['Druck_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
line=ax3.axhline(y=dfDruckReprVec['ACCTR_AV'].max())
for prop,value in attrsDctLDS['Druck_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
if not dfSegReprVec.empty:
# -
line=ax3.axhline(y=-dfSegReprVec['ACCST_AV'].max())
for prop,value in attrsDctLDS['Seg_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
line=ax3.axhline(y=-dfSegReprVec['ACCTR_AV'].max())
for prop,value in attrsDctLDS['Seg_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
if not dfDruckReprVec.empty:
# -
line=ax3.axhline(y=-dfDruckReprVec['ACCST_AV'].max())
for prop,value in attrsDctLDS['Druck_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
line=ax3.axhline(y=-dfDruckReprVec['ACCTR_AV'].max())
for prop,value in attrsDctLDS['Druck_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
ylimSeg,yticksSeg=pltLDSErgVecHelperYLimAndTicks(
dfSegReprVec
,'AC_AV'
,ylim=ylimAC
,yticks=yticksAC
,ylimxlim=ylimACxlim
,xlim=xlim
,ySpanMin=ySpanMin
)
logger.debug("{0:s}ylimACSeg: {1:s} yticksACSeg: {2:s}".format(logStr,str(ylimSeg),str(yticksSeg)))
ylimDrk,yticksDrk=pltLDSErgVecHelperYLimAndTicks(
dfDruckReprVec
,'AC_AV'
,ylim=ylimAC
,yticks=yticksAC
,ylimxlim=ylimACxlim
,xlim=xlim
,ySpanMin=ySpanMin
)
logger.debug("{0:s}ylimACDrk: {1:s} yticksACDrk: {2:s}".format(logStr,str(ylimDrk),str(yticksDrk)))
if ylimSeg[1]>=ylimDrk[1]:
ylimAC=ylimSeg
yticksAC=yticksSeg
else:
ylimAC=ylimDrk
yticksAC=yticksDrk
logger.debug("{0:s}ylimAC: {1:s} yticksAC: {2:s}".format(logStr,str(ylimAC),str(yticksAC)))
ax3.set_ylim(ylimAC)
ax3.set_yticks(yticksAC)
ax3.set_ylabel('a [mm/s²]')
# 4. y-Achse Timer und Volumen ----------------------------------------
if plotTV:
# 4. y-Achse Timer und Volumen ----------------------------------------
ax4 = ax.twinx()
axes['TV']=ax4
yPos=yTwinedAxesPosDeltaHPStart+2*yTwinedAxesPosDeltaHP
if plotAC:
yPos=yPos+yTwinedAxesPosDeltaHP
pltHelperX(
ax4
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yPos
)
pltLDSHelperY(ax4)
if not dfSegReprVec.empty:
# TIMER_AV
lines = pltLDSErgVecHelper(ax4,dfSegReprVec,'TIMER_AV',attrsDctLDS['Seg_TIMER_AV_Attrs'],fct=plotTVTimerFct)
yLines['TIMER_AV Seg']=lines[0]
# AM_AV
lines = pltLDSErgVecHelper(ax4,dfSegReprVec,'AM_AV',attrsDctLDS['Seg_AM_AV_Attrs'],fct=plotTVAmFct)
yLines['AM_AV Seg']=lines[0]
if not dfDruckReprVec.empty:
# TIMER_AV
lines = pltLDSErgVecHelper(ax4,dfDruckReprVec,'TIMER_AV',attrsDctLDS['Druck_TIMER_AV_Attrs'],fct=plotTVTimerFct)
yLines['TIMER_AV Drk']=lines[0]
# AM_AV
lines = pltLDSErgVecHelper(ax4,dfDruckReprVec,'AM_AV',attrsDctLDS['Druck_AM_AV_Attrs'],fct=plotTVAmFct)
yLines['AM_AV Drk']=lines[0]
if not dfSegReprVec.empty or not dfDruckReprVec.empty:
ax4.set_ylim(ylimTV)
ax4.set_yticks(yticksTV)
ax4.set_ylabel(plotTVAmLabel)
ax4.grid()
# 5. y-Achse DPDT ----------------------------------------
if plotDPDT and (not dfSegReprVec.empty or not dfDruckReprVec.empty):
# Min. ermitteln
DPDT_REF_MinSEG=0
if not dfSegReprVec.empty:
if 'DPDT_REF' in dfSegReprVec.columns.to_list():
s=dfSegReprVec.loc[xlim[0]:xlim[1],'DPDT_REF'].dropna()
if not s.empty:
DPDT_REF_MinSEG=s.min()
DPDT_REF_MinDruck=0
if not dfDruckReprVec.empty:
if 'DPDT_REF' in dfDruckReprVec.columns.to_list():
s=dfDruckReprVec.loc[xlim[0]:xlim[1],'DPDT_REF'].dropna()
if not s.empty:
DPDT_REF_MinDruck=s.min()
DPDT_REF_Min=min(DPDT_REF_MinSEG,DPDT_REF_MinDruck)
if DPDT_REF_Min >= 0:
pass # es gibt nichts zu plotten
else:
# Max ermitteln
maxSeg=DPDT_REF_Min
if not dfSegReprVec.empty:
if 'DPDT_REF' in dfSegReprVec.columns.to_list():
s=dfSegReprVec.loc[xlim[0]:xlim[1],'DPDT_REF'].dropna()
s=s[s<0]
if not s.empty:
DPDT_REF_MinSEG=s.max()
maxDruck=DPDT_REF_Min
if not dfDruckReprVec.empty:
if 'DPDT_REF' in dfDruckReprVec.columns.to_list():
s=dfDruckReprVec.loc[xlim[0]:xlim[1],'DPDT_REF'].dropna()
s=s[s<0]
if not s.empty:
DPDT_REF_MinDruck=s.max()
DPDT_REF_Max=max(maxSeg,maxDruck)
# 5. y-Achse DPDT ----------------------------------------
ax5 = ax.twinx()
axes['DPDT']=ax5
yPos=yTwinedAxesPosDeltaHPStart+2*yTwinedAxesPosDeltaHP
if plotAC:
yPos=yPos+yTwinedAxesPosDeltaHP
if plotTV:
yPos=yPos+yTwinedAxesPosDeltaHP
pltHelperX(
ax5
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yPos
)
pltLDSHelperY(ax5)
if not dfSegReprVec.empty:
if 'DPDT_REF' in dfSegReprVec.columns.to_list():
lines = pltLDSErgVecHelper(ax5,dfSegReprVec,'DPDT_REF',attrsDctLDS['Seg_DPDT_REF_Attrs'],fct=None)
yLines['DPDT_REF Seg']=lines[0]
df=dfSegReprVec.loc[xlim[0]:xlim[1],:].query('DPDT_AV < 0')
#logger.debug("{0:s}df (neg. DPDT): {1:s}".format(logStr,df.to_string()))
lines = pltLDSErgVecHelper(ax5,df,'DPDT_AV',attrsDctLDS['Seg_DPDT_AV_Attrs'],fct=None)
yLines['DPDT_AV Seg']=lines[0]
if not dfDruckReprVec.empty:
if 'DPDT_REF' in dfDruckReprVec.columns.to_list():
lines = pltLDSErgVecHelper(ax5,dfDruckReprVec,'DPDT_REF',attrsDctLDS['Druck_DPDT_REF_Attrs'],fct=None)
yLines['DPDT_REF Drk']=lines[0]
df=dfDruckReprVec.loc[xlim[0]:xlim[1],:].query('DPDT_AV < 0')
#logger.debug("{0:s}df (neg. DPDT): {1:s}".format(logStr,df.to_string()))
lines = pltLDSErgVecHelper(ax5,df,'DPDT_AV',attrsDctLDS['Druck_DPDT_AV_Attrs'],fct=None)
yLines['DPDT_AV Drk']=lines[0]
yTickList=[DPDT_REF_Min*10
,DPDT_REF_Min/0.9 # bei einem Vorhaltemass von 0.9 steht hier x; 0.9 X x kann man an den anderen beiden Ticks ablesen
#,DPDT_REF_Min
,0
,DPDT_REF_Min*-1
]
ax5.set_ylim(yTickList[0],yTickList[-1])
ax5.set_yticks([round(yTick,2) for yTick in yTickList])
if DPDT_REF_Max > DPDT_REF_Min:
ax5.set_ylabel("bar/Minute (max. Wert: {:6.3f})".format(DPDT_REF_Max))
else:
ax5.set_ylabel('bar/Minute')
ax5.grid()
if plotLegend:
legendHorizontalPos='center'
if not dfSegReprVec.empty:
if dfDruckReprVec.empty:
loc=legendLoc # Vorgabe
else:
loc='upper '+legendHorizontalPos # beide: fix
patternSeg='Seg$'
axes['A'].add_artist(axes['A'].legend(
tuple([yLines[line] for line in yLines if re.search(patternSeg,line) != None])
,tuple([line for line in yLines if re.search(patternSeg,line) != None])
,loc=loc #'upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if not dfDruckReprVec.empty:
if dfSegReprVec.empty:
loc=legendLoc # Vorgabe
else:
loc='lower '+legendHorizontalPos # beide: fix
patternDruck='Drk$'
axes['A'].add_artist(axes['A'].legend(
tuple([yLines[line] for line in yLines if re.search(patternDruck,line) != None])
,tuple([line for line in yLines if re.search(patternDruck,line) != None])
,loc=loc #'lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return axes,yLines
def pltHelperX(
ax
,dateFormat='%d.%m.%y: %H:%M:%S'
,bysecond=None # [0,15,30,45]
,byminute=None
,byhour=None
,yPos=-0.0125 #: (i.d.R. negativer) Abstand der y-Achse von der Zeichenfläche; default: -0.0125
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
logger.debug("{0:s}bysecond: {1:s}".format(logStr,str(bysecond)))
logger.debug("{0:s}byminute: {1:s}".format(logStr,str(byminute)))
logger.debug("{0:s}byhour: {1:s}".format(logStr,str(byhour)))
logger.debug("{0:s}dateFormat: {1:s}".format(logStr,dateFormat))
if bysecond != None:
majLocatorTmp=mdates.SecondLocator(bysecond=bysecond)
elif byminute != None:
majLocatorTmp=mdates.MinuteLocator(byminute=byminute)
elif byhour != None:
majLocatorTmp=mdates.HourLocator(byhour=byhour)
else:
majLocatorTmp=mdates.HourLocator(byhour=[0,12])
majFormatterTmp=mdates.DateFormatter(dateFormat)
logger.debug("{0:s}ax.xaxis.set_major_locator ...".format(logStr))
ax.xaxis.set_major_locator(majLocatorTmp)
logger.debug("{0:s}ax.xaxis.set_major_formatter ...".format(logStr))
ax.xaxis.set_major_formatter(majFormatterTmp)
#logger.debug("{0:s}ax.get_xticks(): {1:s}".format(logStr,str(ax.get_xticks())))
logger.debug("{0:s}setp(ax.xaxis.get_majorticklabels() ...".format(logStr))
dummy=plt.setp(ax.xaxis.get_majorticklabels(),rotation='vertical',ha='center')
ax.spines["left"].set_position(("axes",yPos))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltLDSHelperY(
ax
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
pltMakePatchSpinesInvisible(ax)
ax.spines['left'].set_visible(True)
ax.yaxis.set_label_position('left')
ax.yaxis.set_ticks_position('left')
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltLDSErgVecHelperYLimAndTicks(
dfReprVec
,dfReprVecCol
,ylim=None #(-10,10) # wenn undef., dann min/max dfReprVec
,yticks=None #[-10,0,10] # wenn undef., dann aus dem Ergebnis von ylim
,ylimxlim=False #wenn Wahr und ylim undef., dann wird nachf. xlim beruecksichtigt bei min/max dfReprVec
,xlim=None
,ySpanMin=0.1 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
):
"""
Returns: ylim,yticks
Der y-Werte-Bereich ylim wird zur x-Achse symmetrisch ermittelt.
yticks spielt dabei keine Rolle.
Sind ylim bzw. yticks definiert, erfahren sie keine Änderung.
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
if ylim != None:
# der y-Wertebereich ist explizit definiert
pass
else:
if not dfReprVec.empty and not dfReprVec.loc[:,dfReprVecCol].isnull().all().all():
if not ylimxlim:
ylimmin=dfReprVec.loc[:,dfReprVecCol].min()
ylimmax=dfReprVec.loc[:,dfReprVecCol].max()
else:
(xlimMin,xlimMax)=xlim
if not dfReprVec.loc[xlimMin:xlimMax,dfReprVecCol].isnull().all().all():
ylimmin=dfReprVec.loc[xlimMin:xlimMax,dfReprVecCol].min()
ylimmax=dfReprVec.loc[xlimMin:xlimMax,dfReprVecCol].max()
else:
ylimmin=0
ylimmax=0
ylimminR=round(ylimmin,0)
ylimmaxR=round(ylimmax,0)
if ylimminR > ylimmin:
ylimminR=ylimminR-1
if ylimmaxR < ylimmax:
ylimmaxR=ylimmaxR+1
ylimminAbsR=math.fabs(ylimminR)
# B auf den extremaleren Wert
ylimB=max(ylimminAbsR,ylimmaxR)
if ylimB < ySpanMin:
# B auf Mindestwert
ylimB=ySpanMin
## Differenz < Mindestwert: B+
#if math.fabs(ylimmax-ylimmin) < ySpanMin:
# ylimB=.5*(ylimminAbs+ylimmax)+ySpanMin
ylim=(-ylimB,ylimB)
else:
ylim=(-ySpanMin,ySpanMin)
if yticks != None:
# die y-Ticks sind explizit definiert
pass
else:
# aus Wertebereich
(ylimMin,ylimMax)=ylim
yticks=[ylimMin,0,ylimMax]
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return ylim,yticks
def pltLDSpQHelperYLimAndTicks(
dfReprVec
,dfReprVecCols
,ylim=None # wenn undef., dann min/max dfReprVec
,yticks=None # wenn undef., dann aus ylimR
,ylimxlim=False # wenn Wahr und ylim undef., dann wird nachf. xlim beruecksichtigt bei min/max dfReprVec
,xlim=None # x-Wertebereich
,ySpanMin=0.1 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,yGridSteps=yGridStepsD
):
"""
Returns: ylim,yticks
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if ylim != None:
# der y-Wertebereich ist explizit definiert
pass
else:
df=dfReprVec.loc[:,[col for col in dfReprVecCols]]
if not ylimxlim:
# Extremalwerte Analysebereich
ylimmin=df.min().min()
ylimmax=df.max().max()
else:
if xlim == None:
logger.error("{0:s} xlim muss angegeben sein wenn ylimxlim Wahr gesetzt wird. Weiter mit ylimxlim Falsch.".format(logStr))
ylimmin=df.min().min()
ylimmax=df.max().max()
else:
# Extremalwerte x-Wertebereich
(xlimMin,xlimMax)=xlim
# Extremalwerte Analysebereich
ylimmin=df.loc[xlimMin:xlimMax,:].min().min()
ylimmax=df.loc[xlimMin:xlimMax,:].max().max()
logger.debug("{0:s} ylimmin={1:10.2f} ylimmax={2:10.2f}.".format(logStr,ylimmin,ylimmax))
if math.fabs(ylimmax-ylimmin) < ySpanMin:
ylimmax=ylimmin+ySpanMin
logger.debug("{0:s} ylimmin={1:10.2f} ylimmax={2:10.2f}.".format(logStr,ylimmin,ylimmax))
ylimMinR=round(ylimmin,0)
ylimMaxR=round(ylimmax,0)
if ylimMinR>ylimmin:
ylimMinR=ylimMinR-1
if ylimMaxR<ylimmax:
ylimMaxR=ylimMaxR+1
logger.debug("{0:s} ylimMinR={1:10.2f} ylimMaxR={2:10.2f}.".format(logStr,ylimMinR,ylimMaxR))
ylim=(ylimMinR,ylimMaxR)
if yticks != None:
# die y-Ticks sind explizit definiert
pass
else:
# aus Wertebereich
(ylimMin,ylimMax)=ylim
if yGridSteps==0:
yticks=[ylimMin,ylimMax]
else:
dYGrid=(ylimMax-ylimMin)/yGridSteps
y=np.arange(ylimMin,ylimMax,dYGrid)
if y[-1]<ylimMax:
y=np.append(y,y[-1]+dYGrid)
yticks=y
logger.debug("{0:s} yticks={1:s}.".format(logStr,str(yticks)))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return ylim,yticks
def pltLDSErgVecHelper(
ax
,dfReprVec=pd.DataFrame()
,ID='AL_S' # Spaltenname in dfReprVec
,attrs={}
,fct=None # Function
):
"""
Helper
Returns:
lines: ax.plot-Ergebnis
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lines=[]
label=ID
x=dfReprVec.index.values
if ID in dfReprVec.columns.to_list():
if fct==None:
y=dfReprVec[ID].values
else:
y=dfReprVec[ID].apply(fct).values
if 'where' in attrs.keys():
logger.debug("{0:s}ID: {1:s}: step-Plot".format(logStr,ID))
lines = ax.step(x,y,label=label
,where=attrs['where'])
else:
lines = ax.plot(x,y,label=label
)
for prop,propValue in [(prop,value) for (prop, value) in attrs.items() if prop not in ['where']]:
plt.setp(lines[0],"{:s}".format(prop),propValue)
else:
logger.warning("{0:s}Spalte: {1:s}: nicht vorhanden?!".format(logStr,ID))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return lines
def pltLDSpQHelper(
ax
,TCdf=pd.DataFrame()
,ID='' # Spaltenname
,xDctValue={} # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs={} # a Dct with - i.e. {'Q Src':{'color':'red'},...}
,IDPltKey='IDPlt' # Schluesselbezeichner in xDctValue (Key in xDctAttrs und xDctFcts)
,IDPltValuePostfix=None # SchluesselPostfix in xDctAttrs und xDctFcts - i.e. ' RTTM'
,xDctFcts={} # a Dct with Fcts - i.e. {'p Src': lambda x: 134.969 + x*10^5/(794.*9.81)}
,timeShift=pd.Timedelta('0 seconds')
):
"""
Helper
Returns:
label: Bezeichner
lines: ax.plot-Ergebnis
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
logger.debug("{:s}Echo der Parameter: xDctFcts: {!s:s}".format(logStr,xDctFcts))
label=''
lines=[]
# nur Not Null plotten
s=TCdf[ID][TCdf[ID].notnull()]
logger.debug("{0:s}timeShift: {1:s}".format(logStr,str(timeShift)))
x=s.index.values+timeShift #TCdf.index.values+timeShift
IDPltValue=None
if IDPltKey in xDctValue.keys():
# es liegt ein Schluessel fuer eine Layout-Informationen vor
IDPltValue=xDctValue[IDPltKey] # koennte auch None sein ... {'IDPlt':None}
if IDPltValue != None and IDPltValuePostfix != None:
IDPltValue=IDPltValue+IDPltValuePostfix
if IDPltValue in xDctFcts.keys():
logger.debug("{:s}Fcts fuer: {:s}".format(logStr,IDPltValue))
fct=xDctFcts[IDPltValue]
y=s.apply(fct).values#TCdf[ID].apply(fct).values
else:
y=s.values #TCdf[ID].values
if IDPltValue != None:
label=IDPltValue+' '+ID
if IDPltValue in xDctAttrs.keys():
if 'where' in xDctAttrs[IDPltValue].keys():
logger.debug("{0:s}ID: {1:s}: step-Plot".format(logStr,ID))
lines = ax.step(x,y
,label=label
,where=xDctAttrs[IDPltValue]['where'])
else:
lines = ax.plot(x,y
,label=label
)
for prop,propValue in [(prop,value) for (prop, value) in xDctAttrs[IDPltValue].items() if prop not in ['where']]:
plt.setp(lines[0],"{:s}".format(prop),propValue)
else:
# es ist kein Layout definiert - einfach plotten
logger.debug("{0:s}IDPltValue: {1:s}: es ist kein Layout definiert - einfach plotten ...".format(logStr,IDPltValue))
lines = ax.plot(x,y
,label=label
)
else:
# es liegt kein Schluessel (oder Wert None) fuer eine Layout-Informationen vor - einfach plotten
label=ID
logger.debug("{0:s}ID: {1:s}: es liegt kein Schluessel (oder kein Wert) fuer eine Layout-Informationen vor - einfach plotten ...".format(logStr,ID))
lines = ax.plot(x,y)
logger.debug("{0:s}label: '{1:s}' len(lines): {2:d}".format(logStr,label,len(lines)))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return label, lines
def pltLDSSIDHelper(
ax
,dfTCsSIDEvents
,dfTCsScenTimeShift
,dfTCsSIDEventsyOffset # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden); max. Erhöhung: 0.9
,pSIDEvents
,valRegExMiddleCmds
,eventCCmds
,eventCStats
,markerDef
,baseColorsDef
):
"""
Helper
Returns:
labels: Bezeichner
scatters: ax.scatter-Ergebnisse
eventCCmds={ 'Out.AUF':0
,'Out.ZU':1
,'Out.HALT':2}
eventCStats={'In.LAEUFT':3
,'In.LAEUFT_NICHT':4
,'In.ZUST':5
,'Out.AUF':6
,'Out.ZU':7
,'Out.HALT':8
,'In.STOER':9}
markerDefSchieber=[ # Schiebersymobole
'^' # 0 Auf
,'v' # 1 Zu
,'>' # 2 Halt
# ab hier Zustaende
,'4' # 3 Laeuft
,'3' # 4 Laeuft nicht
,'P' # 5 Zust (gefülltes "dickes" Kreuz)
,'1' # 6 Auf
,'2' # 7 Zu
,'+' # 8 Halt
,'x' # 9 Stoer
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
labels=[]
scatters=[]
# Anzahl der verschiedenen Schieber ermitteln
idxKat={}
idxSchieberLfd=0
for col in dfTCsSIDEvents.columns:
m=re.search(pSIDEvents,col)
valRegExSchieberID=m.group('colRegExSchieberID')
if valRegExSchieberID not in idxKat.keys():
idxKat[valRegExSchieberID]=idxSchieberLfd
idxSchieberLfd=idxSchieberLfd+1
logger.debug("{0:s}Dct idxKat: keys (versch. Schieber - meint versch. Kategorien): {1:s}: values (Index der jeweiligen Kategorie): {2:s}".format(logStr,str(idxKat.keys()),str(idxKat.values())))
dfTCsSIDEventsPlot = dfTCsSIDEvents # hier keine Veränderungen mehr
for col in dfTCsSIDEventsPlot.columns:
m=re.search(pSIDEvents,col)
valRegExSchieberID=m.group('colRegExSchieberID')
idxSchieberLfd=idxKat[valRegExSchieberID]
valRegExEventID=m.group('colRegExEventID')
valRegExMiddle=m.group('colRegExMiddle')
# Markersize
s=plt.rcParams['lines.markersize']**2
# Marker
if valRegExMiddle == valRegExMiddleCmds:
idxMarker=eventCCmds[valRegExEventID]
else:
idxMarker=eventCStats[valRegExEventID]
if valRegExEventID in ['In.ZUST']:
s=s*2.5
if idxMarker < len(markerDef):
m=markerDef[idxMarker]
else:
m=markerDef[-1]
logger.debug("{0:s}{1:s}: idxMarker: Soll: {2:d} MarkerIdx gewählt: {3:d}".format(logStr,col,idxMarker,len(markerDef)-1))
if idxSchieberLfd < len(baseColorsDef):
c=baseColorsDef[idxSchieberLfd]
else:
c=baseColorsDef[-1]
logger.debug("{0:s}{1:s}: idxSchieberLfd: Ist: {2:d} FarbenIdx gewählt: {3:d}".format(logStr,col,idxSchieberLfd,len(baseColorsDef)-1))
colors=[c for idx in range(len(dfTCsSIDEventsPlot.index))] # alle Ereignisse (der Spalte) haben dieselbe Farbe
label=col # alle Ereignisse (der Spalte) haben dasselbe Label
#sDefault=plt.rcParams['lines.markersize']**2
x=dfTCsSIDEventsPlot.index.values+dfTCsScenTimeShift
y=dfTCsSIDEventsPlot[col].values+min(idxSchieberLfd*dfTCsSIDEventsyOffset,.9)
logger.debug("{:s}{:s}: erste und letzte Werte: x:{!s:s} y:{!s:s}...".format(logStr,col,x[::len(x)-1],y[::len(y)-1]))
scatter = ax.scatter(
x
,y
,c=colors
,marker=m
,label=label
,s=s#Default
)
# scatter ist eine PathCollection; Attribut u.a. get_label(): Return the label used for this artist in the legend
# auch wenn y durchgehend Null wird ein scatter zurueckgegeben (d.h. ist ein Legendeneintrag vorhanden)
labels.append(label)
scatters.append(scatter)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return labels, scatters
# --- PLOT: Funktionen und Hilfsfunktionen
# -----------------------------------------------
def pltMakeCategoricalCmap(baseColorsDef="tab10",catagoryColors=None,nOfSubCatsReq=3,reversedSubCatOrder=False):
"""
Returns a cmap with nOfCatsReq * nOfSubCatsReq discrete colors.
Parameter:
baseColorsDef: a (discrete) cmap defining the "base"colors
default: tab10
if baseColorsDef is not via get_cmap a matplotlib.colors.ListedColormap, baseColorsDef is interpreted via to_rgb as a list of colors
in this case catagoryColors is ignored
catagoryColors: a list of "base"colors indices for this cmap
the length of the list is the number of Categories requested: nOfCatsReq
apparently cmap's nOfColors must be ge than nOfCatsReq
default: None (==> nOfCatsReq = cmap's nOfColors)
i.e. [2,8,3] for tab10 is green, yellow (ocher), red
nOfSubCatsReq: number of Subcategories requested
reversedSubCatOrder: False (default): if True, the last color of a category is from baseColorsDef
reversedSubCatOrder can be a list
Returns:
cmap with nOfCatsReq * nOfSubCatsReq discrete colors; None if an error occurs
one "base"color per category
nOfSubCatsReq "sub"colors per category
so each category consists of nOfSubCatsReq colors
Raises:
RmError
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import Rm
>>> Rm.pltMakeCategoricalCmap().N
30
>>> Rm.pltMakeCategoricalCmap(catagoryColors=[2,8,3]).N # 2 8 3 in tab10: grün gelb rot
9
>>> baseColorsDef="tab10"
>>> catagoryColors=[2,8,3]
>>> nOfSubCatsReq=4
>>> # grün gelb rot mit je 4 Farben von hell nach dunkel
>>> cm=Rm.pltMakeCategoricalCmap(baseColorsDef=baseColorsDef,catagoryColors=catagoryColors,nOfSubCatsReq=nOfSubCatsReq,reversedSubCatOrder=True)
>>> cm.colors
array([[0.75 , 1. , 0.75 ],
[0.51819172, 0.87581699, 0.51819172],
[0.32570806, 0.75163399, 0.32570806],
[0.17254902, 0.62745098, 0.17254902],
[0.9983871 , 1. , 0.75 ],
[0.91113148, 0.91372549, 0.51165404],
[0.82408742, 0.82745098, 0.30609849],
[0.7372549 , 0.74117647, 0.13333333],
[1. , 0.75 , 0.75142857],
[0.94640523, 0.53069452, 0.53307001],
[0.89281046, 0.33167491, 0.3348814 ],
[0.83921569, 0.15294118, 0.15686275]])
>>> cm2=Rm.pltMakeCategoricalCmap(baseColorsDef=baseColorsDef,catagoryColors=catagoryColors,nOfSubCatsReq=nOfSubCatsReq,reversedSubCatOrder=[False]+2*[True])
>>> cm.colors[nOfSubCatsReq-1]==cm2.colors[0]
array([ True, True, True])
>>> plt.close()
>>> size_DINA6quer=(5.8,4.1)
>>> fig, ax = plt.subplots(figsize=size_DINA6quer)
>>> fig.subplots_adjust(bottom=0.5)
>>> norm=matplotlib.colors.Normalize(vmin=0, vmax=100)
>>> cb=matplotlib.colorbar.ColorbarBase(ax, cmap=cm2,norm=norm,orientation='horizontal')
>>> cb.set_label('baseColorsDef was (via get_cmap) a matplotlib.colors.ListedColormap')
>>> #plt.show()
>>> cm3=Rm.pltMakeCategoricalCmap(baseColorsDef=['b','c','m'],nOfSubCatsReq=nOfSubCatsReq,reversedSubCatOrder=True)
>>> cm3.colors
array([[0.75 , 0.75 , 1. ],
[0.5 , 0.5 , 1. ],
[0.25 , 0.25 , 1. ],
[0. , 0. , 1. ],
[0.75 , 1. , 1. ],
[0.45833333, 0.91666667, 0.91666667],
[0.20833333, 0.83333333, 0.83333333],
[0. , 0.75 , 0.75 ],
[1. , 0.75 , 1. ],
[0.91666667, 0.45833333, 0.91666667],
[0.83333333, 0.20833333, 0.83333333],
[0.75 , 0. , 0.75 ]])
>>> plt.close()
>>> fig, ax = plt.subplots(figsize=size_DINA6quer)
>>> fig.subplots_adjust(bottom=0.5)
>>> norm=matplotlib.colors.Normalize(vmin=0, vmax=100)
>>> cb=matplotlib.colorbar.ColorbarBase(ax, cmap=cm3,norm=norm,orientation='horizontal')
>>> cb.set_label('baseColorsDef was (via to_rgb) a list of colors')
>>> #plt.show()
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
cmap=None
try:
try:
# Farben, "base"colors, welche die cmap hat
nOfColors=plt.get_cmap(baseColorsDef).N
if catagoryColors==None:
catagoryColors=np.arange(nOfColors,dtype=int)
# verlangte Kategorien
nOfCatsReq=len(catagoryColors)
if nOfCatsReq > nOfColors:
logStrFinal="{0:s}: nOfCatsReq: {1:d} > cmap's nOfColors: {2:d}!".format(logStr,nOfCatsReq,nOfColors)
raise RmError(logStrFinal)
if max(catagoryColors) > nOfColors-1:
logStrFinal="{0:s}: max. Idx of catsReq: {1:d} > cmap's nOfColors-1: {2:d}!".format(logStr,max(catagoryColors),nOfColors-1)
raise RmError(logStrFinal)
# alle Farben holen, welche die cmap hat
ccolors = plt.get_cmap(baseColorsDef)(np.arange(nOfColors,dtype=int))
# die gewuenschten Kategorie"Basis"farben extrahieren
ccolors=[ccolors[idx] for idx in catagoryColors]
except:
listOfColors=baseColorsDef
nOfColors=len(listOfColors)
nOfCatsReq=nOfColors
ccolors=[]
for color in listOfColors:
ccolors.append(list(matplotlib.colors.to_rgb(color)))
finally:
pass
logger.debug("{0:s}ccolors: {1:s}".format(logStr,str(ccolors)))
logger.debug("{0:s}nOfCatsReq: {1:s}".format(logStr,str((nOfCatsReq))))
logger.debug("{0:s}nOfSubCatsReq: {1:s}".format(logStr,str((nOfSubCatsReq))))
# Farben bauen -------------------------------------
# resultierende Farben vorbelegen
cols = np.zeros((nOfCatsReq*nOfSubCatsReq, 3))
# ueber alle Kategoriefarben
if type(reversedSubCatOrder) is not list:
reversedSubCatOrderLst=nOfCatsReq*[reversedSubCatOrder]
else:
reversedSubCatOrderLst=reversedSubCatOrder
logger.debug("{0:s}reversedSubCatOrderLst: {1:s}".format(logStr,str((reversedSubCatOrderLst))))
for i, c in enumerate(ccolors):
rgb=pltMakeCategoricalColors(c,nOfSubColorsReq=nOfSubCatsReq,reversedOrder=reversedSubCatOrderLst[i])
cols[i*nOfSubCatsReq:(i+1)*nOfSubCatsReq,:] = rgb
cmap = matplotlib.colors.ListedColormap(cols)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return cmap
def pltMakePatchSpinesInvisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
def pltHlpAlignMarker(marker,halign='center',valign='middle'):
"""
create markers with specified alignment.
Parameters
----------
marker : a valid marker specification.
See mpl.markers
halign : string, float {'left', 'center', 'right'}
Specifies the horizontal alignment of the marker. *float* values
specify the alignment in units of the markersize/2 (0 is 'center',
-1 is 'right', 1 is 'left').
valign : string, float {'top', 'middle', 'bottom'}
Specifies the vertical alignment of the marker. *float* values
specify the alignment in units of the markersize/2 (0 is 'middle',
-1 is 'top', 1 is 'bottom').
Returns
-------
marker_array : numpy.ndarray
A Nx2 array that specifies the marker path relative to the
plot target point at (0, 0).
Notes
-----
The mark_array can be passed directly to ax.plot and ax.scatter, e.g.::
ax.plot(1, 1, marker=align_marker('>', 'left'))
"""
if isinstance(halign,str):
halign = {'right': -1.,
'middle': 0.,
'center': 0.,
'left': 1.,
}[halign]
if isinstance(valign,str):
valign = {'top': -1.,
'middle': 0.,
'center': 0.,
'bottom': 1.,
}[valign]
# Define the base marker
bm = markers.MarkerStyle(marker)
# Get the marker path and apply the marker transform to get the
# actual marker vertices (they should all be in a unit-square
# centered at (0, 0))
m_arr = bm.get_path().transformed(bm.get_transform()).vertices
# Shift the marker vertices for the specified alignment.
m_arr[:, 0] += halign / 2
m_arr[:, 1] += valign / 2
return Path(m_arr, bm.get_path().codes)
def pltNetFigAx(pDf,**kwds):
"""
Erzeugt eine für die Netzdarstellung verzerrungsfreie Axes-Instanz.
* verwendet gcf() (will return an existing figure if one is open, or it will make a new one if there is no active figure)
* an already existing figure might be created this way: fig=plt.figure(dpi=2*72,linewidth=1.)
* errechnet die verzerrungsfreie Darstellung unter Berücksichtigung einer zukünftigen horizontalen Farblegende
* erzeugt eine Axes-Instanz
* setzt Attribute der Axes-Instanz
* setzt Attribute der Figure-Instanz
Args:
pDf: dataFrame
Coordinates:
* pXCor_i: colName in pDf (default: 'pXCor_i'): x-Start Coordinate of all Edges to be plotted
* pYCor_i: colName in pDf (default: 'pYCor_i'): y-Start Coordinate of all Edges to be plotted
* pXCor_k: colName in pDf (default: 'pXCor_k'): x-End Coordinate of all Edges to be plotted
* pYCor_k: colName in pDf (default: 'pYCor_k'): y-End Coordinate of all Edges to be plotted
Colorlegend:
* CBFraction: fraction of original axes to use for colorbar (default: 0.05)
* CBHpad: fraction of original axes between colorbar and new image axes (default: 0.0275)
Figure:
* pltTitle: title [not suptitle] (default: 'pltNetFigAx')
* figFrameon: figure frame (background): displayed or invisible (default: True)
* figEdgecolor: edge color of the Figure rectangle (default: 'black')
* figFacecolor: face color of the Figure rectangle (default: 'white')
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# Coordinates
if 'pXCor_i' not in keys:
kwds['pXCor_i']='pXCor_i'
if 'pYCor_i' not in keys:
kwds['pYCor_i']='pYCor_i'
if 'pXCor_k' not in keys:
kwds['pXCor_k']='pXCor_k'
if 'pYCor_k' not in keys:
kwds['pYCor_k']='pYCor_k'
# Colorlegend
if 'CBFraction' not in keys:
kwds['CBFraction']=0.05
if 'CBHpad' not in keys:
kwds['CBHpad']=0.0275
# Figure
if 'pltTitle' not in keys:
kwds['pltTitle']='pltNetFigAx'
if 'figFrameon' not in keys:
kwds['figFrameon']=True
if 'figEdgecolor' not in keys:
kwds['figEdgecolor']='black'
if 'figFacecolor' not in keys:
kwds['figFacecolor']='white'
except:
pass
try:
dx=max(pDf[kwds['pXCor_i']].max(),pDf[kwds['pXCor_k']].max())
dy=max(pDf[kwds['pYCor_i']].max(),pDf[kwds['pYCor_k']].max())
# erf. Verhältnis bei verzerrungsfreier Darstellung
dydx=dy/dx
if(dydx>=1):
dxInch=DINA4_x # Hochformat
else:
dxInch=DINA4_y # Querformat
figwidth=dxInch
#verzerrungsfrei: Blattkoordinatenverhaeltnis = Weltkoordinatenverhaeltnis
factor=1-(kwds['CBFraction']+kwds['CBHpad'])
# verzerrungsfreie Darstellung sicherstellen
figheight=figwidth*dydx*factor
# Weltkoordinatenbereich
xlimLeft=0
ylimBottom=0
xlimRight=dx
ylimTop=dy
# plt.figure(dpi=, facecolor=, edgecolor=, linewidth=, frameon=True)
fig = plt.gcf() # This will return an existing figure if one is open, or it will make a new one if there is no active figure.
fig.set_figwidth(figwidth)
fig.set_figheight(figheight)
logger.debug("{:s}dx={:10.2f} dy={:10.2f}".format(logStr,dx,dy))
logger.debug("{:s}figwidth={:10.2f} figheight={:10.2f}".format(logStr,figwidth,figheight))
ax=plt.subplot()
ax.set_xlim(left=xlimLeft)
ax.set_ylim(bottom=ylimBottom)
ax.set_xlim(right=xlimRight)
ax.set_ylim(top=ylimTop)
xTicks=ax.get_xticks()
dxTick = xTicks[1]-xTicks[0]
yTicks=ax.set_yticks([idx*dxTick for idx in range(math.floor(dy/dxTick)+1)])
plt.title(kwds['pltTitle'])
fig.set_frameon(kwds['figFrameon'])
fig.set_edgecolor(kwds['figEdgecolor'])
fig.set_facecolor(kwds['figFacecolor'])
# https://stackoverflow.com/questions/14827650/pyplot-scatter-plot-marker-size
# Size in pts:
# the argument markersize in plot denotes the markersize (i.e. diameter) in points
# the argument s in scatter denotes the markersize**2 in points^2
# so a given plot-marker with markersize=x needs a scatter-marker with s=x**2 if the scatter-marker shall cover the same "area" in points^2
# the "area" of the scatter-marker is proportional to the s param
# What are points - pts:
# the standard size of points in matplotlib is 72 ppi
# 1 point is hence 1/72 inches (1 inch = 1 Zoll = 2.54 cm)
# 1 point = 0.352777.... mm
# points and pixels - px:
# 1 point = dpi/ppi
# the standard dpi in matplotlib is 100
# a scatter-marker whos "area" covers always 10 pixel:
# s=(10*ppi/dpi)**2
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltNetNodes(pDf,**kwds):
"""
Scatters NODEs on gca().
Args:
pDf: dataFrame
NODE: Size (Attribute)
* pAttribute: colName (default: 'Attribute') in pDf
* pSizeFactor: (deafault: 1.)
* scatter Sy-Area in pts^2 = pSizeFactor * Attribute
NODE: Color (Measure)
* pMeasure: colName (default: 'Measure') in pDf
* pMeasureColorMap (default: plt.cm.autumn)
* pMeasureAlpha (default: 0.9)
* pMeasureClip (default: False)
* CBFixedLimits (default: True)
* CBFixedLimitLow (default: 0.)
* CBFixedLimitHigh (default: 1.)
NODE: 3Classes
* pMeasure3Classes (default: True)
* pMCategory: colName (default: 'MCategory') in pDf
* pMCatTopTxt (default: 'Top')
* pMCatMidTxt (default: 'Middle')
* pMCatBotTxt (default: 'Bottom')
* pMCatTopColor (default: 'palegreen')
* pMCatTopAlpha (default: 0.9)
* pMCatTopClip (default: False)
* pMCatMidColorMap (default: plt.cm.autumn)
* pMCatMidAlpha (default: 0.9)
* pMCatMidClip (default: False)
* pMCatBotColor (default: 'violet')
* pMCatBotAlpha (default: 0.9)
* pMCatBotClip (default: False)
NODE:
* pXCor: colName (default: 'pXCor_i') in pDf
* pYCor: colName (default: 'pYCor_i') in pDf
Returns:
(pcN, vmin, vmax)
* pcN: die mit Farbskala gezeichneten Symbole
* vmin/vmax: die für die Farbskala verwendeten Extremalwerte
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# NODE: Size (Attribute)
if 'pAttribute' not in keys:
kwds['pAttribute']='Attribute'
if 'pSizeFactor' not in keys:
kwds['pSizeFactor']=1.
# NODE: Color (Measure)
if 'pMeasure' not in keys:
kwds['pMeasure']='Measure'
if 'pMeasureColorMap' not in keys:
kwds['pMeasureColorMap']=plt.cm.autumn
if 'pMeasureAlpha' not in keys:
kwds['pMeasureAlpha']=0.9
if 'pMeasureClip' not in keys:
kwds['pMeasureClip']=False
if 'CBFixedLimits' not in keys:
kwds['CBFixedLimits']=True
if 'CBFixedLimitLow' not in keys:
kwds['CBFixedLimitLow']=0.
if 'CBFixedLimitHigh' not in keys:
kwds['CBFixedLimitHigh']=1.
# NODE: 3Classes
if 'pMeasure3Classes' not in keys:
kwds['pMeasure3Classes']=True
if 'pMCategory' not in keys:
kwds['pMCategory']='MCategory'
if 'pMCatTopTxt' not in keys:
kwds['pMCatTopTxt']='Top'
if 'pMCatMidTxt' not in keys:
kwds['pMCatMidTxt']='Middle'
if 'pMCatBotTxt' not in keys:
kwds['pMCatBotTxt']='Bottom'
if 'pMCatTopColor' not in keys:
kwds['pMCatTopColor']='palegreen'
if 'pMCatTopAlpha' not in keys:
kwds['pMCatTopAlpha']=0.9
if 'pMCatTopClip' not in keys:
kwds['pMCatTopClip']=False
if 'pMCatMidColorMap' not in keys:
kwds['pMCatMidColorMap']=plt.cm.autumn
if 'pMCatMidAlpha' not in keys:
kwds['pMCatMidAlpha']=0.9
if 'pMCatMidClip' not in keys:
kwds['pMCatMidClip']=False
if 'pMCatBotColor' not in keys:
kwds['pMCatBotColor']='violet'
if 'pMCatBotAlpha' not in keys:
kwds['pMCatBotAlpha']=0.9
if 'pMCatBotClip' not in keys:
kwds['pMCatBotClip']=False
# NODE:
if 'pXCor' not in keys:
kwds['pXCor']='pXCor_i'
if 'pYCor' not in keys:
kwds['pYCor']='pYCor_i'
except:
pass
try:
ax=plt.gca()
if kwds['pMeasure3Classes']:
pN_top=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatTopTxt'])]
pN_mid=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatMidTxt'])]
pN_bot=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatBotTxt'])]
pN_top_Anz,col=pN_top.shape
pN_mid_Anz,col=pN_mid.shape
pN_bot_Anz,col=pN_bot.shape
pcN_top=ax.scatter(
pN_top[kwds['pXCor']],pN_top[kwds['pYCor']]
,s=kwds['pSizeFactor']*pN_top[kwds['pAttribute']]
,color=kwds['pMCatTopColor']
,alpha=kwds['pMCatTopAlpha']
,edgecolors='face'
,clip_on=kwds['pMCatTopClip'])
logger.debug("{:s}Anzahl mit fester Farbe Top gezeichneter Symbole={:d}".format(logStr,pN_top_Anz))
if not kwds['CBFixedLimits']:
vmin=pN_mid[kwds['pMeasure']].min()
vmax=pN_mid[kwds['pMeasure']].max()
else:
vmin=kwds['CBFixedLimitLow']
vmax=kwds['CBFixedLimitHigh']
pcN=ax.scatter(
pN_mid[kwds['pXCor']],pN_mid[kwds['pYCor']]
,s=kwds['pSizeFactor']*pN_mid[kwds['pAttribute']]
# Farbskala
,cmap=kwds['pMCatMidColorMap']
# Normierung Farbe
,vmin=vmin
,vmax=vmax
# Farbwert
,c=pN_mid[kwds['pMeasure']]
,alpha=kwds['pMCatMidAlpha']
,edgecolors='face'
,clip_on=kwds['pMCatMidClip']
)
logger.debug("{:s}Anzahl mit Farbskala gezeichneter Symbole={:d}".format(logStr,pN_mid_Anz))
pcN_bot=ax.scatter(
pN_bot[kwds['pXCor']],pN_bot[kwds['pYCor']]
,s=kwds['pSizeFactor']*pN_bot[kwds['pAttribute']]
,color=kwds['pMCatBotColor']
,alpha=kwds['pMCatBotAlpha']
,edgecolors='face'
,clip_on=kwds['pMCatBotClip'])
logger.debug("{:s}Anzahl mit fester Farbe Bot gezeichneter Symbole={:d}".format(logStr,pN_bot_Anz))
else:
pN_Anz,col=pDf.shape
if not kwds['CBFixedLimits']:
vmin=pDf[kwds['pMeasure']].min()
vmax=pDf[kwds['pMeasure']].max()
else:
vmin=kwds['CBFixedLimitLow']
vmax=kwds['CBFixedLimitHigh']
pcN=ax.scatter(
pDf[kwds['pXCor']],pDf[kwds['pYCor']]
,s=kwds['pSizeFactor']*pDf[kwds['pAttribute']]
# Farbskala
,cmap=kwds['pMeasureColorMap']
# Normierung Farbe
,vmin=vmin
,vmax=vmax
# Farbwert
,c=pDf[kwds['pMeasure']]
,alpha=kwds['pMeasureAlpha']
,edgecolors='face'
,clip_on=kwds['pMeasureClip']
)
logger.debug("{:s}Anzahl mit Farbskala gezeichneter Symbole={:d}".format(logStr,pN_Anz))
logger.debug("{:s}Farbskala vmin={:10.3f} Farbskala vmax={:10.3f}".format(logStr,vmin,vmax))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return (pcN, vmin, vmax)
def pltNetPipes(pDf,**kwds):
"""
Plots Lines with Marker on gca().
Args:
pDf: dataFrame
PIPE-Line:
* pAttribute: column in pDf (default: 'Attribute')
* pAttributeLs (default: '-')
* pAttributeSizeFactor: plot linewidth in pts = pAttributeSizeFactor (default: 1.0) * Attribute
* pAttributeSizeMin (default: None): if set: use pAttributeSizeMin-Value as Attribute for LineSize if Attribute < pAttributeSizeMin
* pAttributeColorMap (default: plt.cm.binary)
* pAttributeColorMapUsageStart (default: 1./3; Wertebereich: [0,1])
* Farbskala nach vorh. min./max. Wert
* die Farbskala wird nur ab UsageStart genutzt
* d.h. Werte die eine "kleinere" Farbe hätten, bekommen die Farbe von UsageStart
PIPE-Marker:
* pMeasure: column in pDf (default: 'Measure')
* pMeasureMarker (default: '.')
* pMeasureSizeFactor: plot markersize in pts = pMeasureSizeFactor (default: 1.0) * Measure
* pMeasureSizeMin (default: None): if set: use pMeasureSizeMin-Value as Measure for MarkerSize if Measure < pMeasureSizeMin
* pMeasureColorMap (default: plt.cm.cool)
* pMeasureColorMapUsageStart (default: 0.; Wertebereich: [0,1])
* Farbskala nach vorh. min./max. Wert
* die Farbskala wird nur ab UsageStart genutzt
* d.h. Werte die eine "kleinere" Farbe hätten, bekommen die Farbe von UsageStart
PIPE:
* pWAYPXCors: column in pDf (default: 'pWAYPXCors')
* pWAYPYCors: column in pDf (default: 'pWAYPYCors')
* pClip (default: False)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# PIPE-Line
if 'pAttribute' not in keys:
kwds['pAttribute']='Attribute'
if 'pAttributeSizeFactor' not in keys:
kwds['pAttributeSizeFactor']=1.
if 'pAttributeSizeMin' not in keys:
kwds['pAttributeSizeMin']=None
if 'pAttributeLs' not in keys:
kwds['pAttributeLs']='-'
if 'pAttributeColorMap' not in keys:
kwds['pAttributeColorMap']=plt.cm.binary
if 'pAttributeColorMapUsageStart' not in keys:
kwds['pAttributeColorMapUsageStart']=1./3.
# PIPE-Marker
if 'pMeasure' not in keys:
kwds['pMeasure']='Measure'
if 'pMeasureSizeFactor' not in keys:
kwds['pMeasureSizeFactor']=1.
if 'pMeasureSizeMin' not in keys:
kwds['pMeasureSizeMin']=None
if 'pMeasureMarker' not in keys:
kwds['pMeasureMarker']='.'
if 'pMeasureColorMap' not in keys:
kwds['pMeasureColorMap']=plt.cm.cool
if 'pMeasureColorMapUsageStart' not in keys:
kwds['pMeasureColorMapUsageStart']=0.
# PIPE
if 'pWAYPXCors' not in keys:
kwds['pWAYPXCors']='pWAYPXCors'
if 'pWAYPYCors' not in keys:
kwds['pWAYPYCors']='pWAYPYCors'
if 'pClip' not in keys:
kwds['pClip']=False
except:
pass
try:
# Line
minLine=pDf[kwds['pAttribute']].min()
maxLine=pDf[kwds['pAttribute']].max()
logger.debug("{:s}minLine (Attribute): {:6.2f}".format(logStr,minLine))
logger.debug("{:s}maxLine (Attribute): {:6.2f}".format(logStr,maxLine))
normLine=colors.Normalize(minLine,maxLine)
usageLineValue=minLine+kwds['pAttributeColorMapUsageStart']*(maxLine-minLine)
usageLineColor=kwds['pAttributeColorMap'](normLine(usageLineValue))
# Marker
minMarker=pDf[kwds['pMeasure']].min()
maxMarker=pDf[kwds['pMeasure']].max()
logger.debug("{:s}minMarker (Measure): {:6.2f}".format(logStr,minMarker))
logger.debug("{:s}maxMarker (Measure): {:6.2f}".format(logStr,maxMarker))
normMarker=colors.Normalize(minMarker,maxMarker)
usageMarkerValue=minMarker+kwds['pMeasureColorMapUsageStart']*(maxMarker-minMarker)
usageMarkerColor=kwds['pMeasureColorMap'](normMarker(usageMarkerValue))
ax=plt.gca()
for xs,ys,vLine,vMarker in zip(pDf[kwds['pWAYPXCors']],pDf[kwds['pWAYPYCors']],pDf[kwds['pAttribute']],pDf[kwds['pMeasure']]):
if vLine >= usageLineValue:
colorLine=kwds['pAttributeColorMap'](normLine(vLine))
else:
colorLine=usageLineColor
if vMarker >= usageMarkerValue:
colorMarker=kwds['pMeasureColorMap'](normMarker(vMarker))
else:
colorMarker=usageMarkerColor
linewidth=kwds['pAttributeSizeFactor']*vLine
if kwds['pAttributeSizeMin'] != None:
if vLine < kwds['pAttributeSizeMin']:
linewidth=kwds['pAttributeSizeFactor']*kwds['pAttributeSizeMin']
mSize=kwds['pMeasureSizeFactor']*vMarker
if kwds['pMeasureSizeMin'] != None:
if vMarker < kwds['pMeasureSizeMin']:
mSize=kwds['pMeasureSizeFactor']*kwds['pMeasureSizeMin']
pcLines=ax.plot(xs,ys
,color=colorLine
,linewidth=linewidth
,ls=kwds['pAttributeLs']
,marker=kwds['pMeasureMarker']
,mfc=colorMarker
,mec=colorMarker
,mfcalt=colorMarker
,mew=0
,ms=mSize #kwds['pMeasureSizeFactor']*vMarker
,markevery=[0,len(xs)-1]
,aa=True
,clip_on=kwds['pClip']
)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltNetLegendColorbar(pc,pDf,**kwds):
"""
Erzeugt eine Axes cax für den Legendenbereich aus ax (=gca()) und zeichnet auf cax die Farblegende (die Farbskala mit allen Eigenschaften).
Args:
pc: (eingefaerbte) PathCollection (aus pltNetNodes); wird für die Erzeugung der Farbskala zwingend benoetigt
pDf: dataFrame (default: None)
Measure:
* pMeasure: colName in pDf (default: 'Measure')
* pMeasureInPerc: Measure wird interpretiert in Prozent [0-1] (default: True)
* pMeasure3Classes (default: False d.h. Measure wird nicht in 3 Klassen dargestellt)
CBFixedLimits (Ticks):
* CBFixedLimits (default: False d.h. Farbskala nach vorh. min./max. Wert)
* CBFixedLimitLow (default: .10)
* CBFixedLimitHigh (default: .95)
Label:
* pMeasureUNIT (default: '[]')
* pMeasureTYPE (default: '')
CB
* CBFraction: fraction of original axes to use for colorbar (default: 0.05)
* CBHpad: fraction of original axes between colorbar and new image axes (default: 0.0275)
* CBLabelPad (default: -50)
* CBTicklabelsHPad (default: 0.)
* CBAspect: ratio of long to short dimension (default: 10.)
* CBShrink: fraction by which to shrink the colorbar (default: 0.3)
* CBAnchorHorizontal: horizontaler Fußpunkt der colorbar in Plot-% (default: 0.)
* CBAnchorVertical: vertikaler Fußpunkt der colorbar in Plot-% (default: 0.2)
Return:
cax
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# Measure
if 'pMeasure' not in keys:
kwds['pMeasure']='Measure'
if 'pMeasureInPerc' not in keys:
kwds['pMeasureInPerc']=True
if 'pMeasure3Classes' not in keys:
kwds['pMeasure3Classes']=False
# Label
if 'pMeasureUNIT' not in keys:
kwds['pMeasureUNIT']='[]'
if 'pMeasureTYPE' not in keys:
kwds['pMeasureTYPE']=''
# CBFixedLimits
if 'CBFixedLimits' not in keys:
kwds['CBFixedLimits']=False
if 'CBFixedLimitLow' not in keys:
kwds['CBFixedLimitLow']=.10
if 'CBFixedLimitHigh' not in keys:
kwds['CBFixedLimitHigh']=.95
# CB
if 'CBFraction' not in keys:
kwds['CBFraction']=0.05
if 'CBHpad' not in keys:
kwds['CBHpad']=0.0275
if 'CBLabelPad' not in keys:
kwds['CBLabelPad']=-50
if 'CBTicklabelsHPad' not in keys:
kwds['CBTicklabelsHPad']=0
if 'CBAspect' not in keys:
kwds['CBAspect']=10.
if 'CBShrink' not in keys:
kwds['CBShrink']=0.3
if 'CBAnchorHorizontal' not in keys:
kwds['CBAnchorHorizontal']=0.
if 'CBAnchorVertical' not in keys:
kwds['CBAnchorVertical']=0.2
except:
pass
try:
ax=plt.gca()
fig=plt.gcf()
# cax
cax=None
cax,kw=make_axes(ax
,location='right'
,fraction=kwds['CBFraction'] # fraction of original axes to use for colorbar
,pad=kwds['CBHpad'] # fraction of original axes between colorbar and new image axes
,anchor=(kwds['CBAnchorHorizontal'],kwds['CBAnchorVertical']) # the anchor point of the colorbar axes
,aspect=kwds['CBAspect'] # ratio of long to short dimension
,shrink=kwds['CBShrink'] # fraction by which to shrink the colorbar
)
# colorbar
colorBar=fig.colorbar(pc
,cax=cax
,**kw
)
# tick Values
if kwds['pMeasure3Classes']: # FixedLimits should be True and FixedLimitHigh/Low should be set ...
minCBtickValue=kwds['CBFixedLimitLow']
maxCBtickValue=kwds['CBFixedLimitHigh']
else:
if kwds['CBFixedLimits'] and isinstance(kwds['CBFixedLimitHigh'],float) and isinstance(kwds['CBFixedLimitLow'],float):
minCBtickValue=kwds['CBFixedLimitLow']
maxCBtickValue=kwds['CBFixedLimitHigh']
else:
minCBtickValue=pDf[kwds['pMeasure']].min()
maxCBtickValue=pDf[kwds['pMeasure']].max()
colorBar.set_ticks([minCBtickValue,minCBtickValue+.5*(maxCBtickValue-minCBtickValue),maxCBtickValue])
# tick Labels
if kwds['pMeasureInPerc']:
if kwds['pMeasure3Classes']:
minCBtickLabel=">{:3.0f}%".format(minCBtickValue*100)
maxCBtickLabel="<{:3.0f}%".format(maxCBtickValue*100)
else:
minCBtickLabel="{:6.2f}%".format(minCBtickValue*100)
maxCBtickLabel="{:6.2f}%".format(maxCBtickValue*100)
else:
if kwds['pMeasure3Classes']:
minCBtickLabel=">{:6.2f}".format(minCBtickValue)
maxCBtickLabel="<{:6.2f}".format(maxCBtickValue)
else:
minCBtickLabel="{:6.2f}".format(minCBtickValue)
maxCBtickLabel="{:6.2f}".format(maxCBtickValue)
logger.debug("{:s}minCBtickLabel={:s} maxCBtickLabel={:s}".format(logStr,minCBtickLabel,maxCBtickLabel))
colorBar.set_ticklabels([minCBtickLabel,'',maxCBtickLabel])
colorBar.ax.yaxis.set_tick_params(pad=kwds['CBTicklabelsHPad'])
# Label
if kwds['pMeasureInPerc']:
CBLabelText="{:s} in [%]".format(kwds['pMeasureTYPE'])
else:
CBLabelText="{:s} in {:s}".format(kwds['pMeasureTYPE'],kwds['pMeasureUNIT'])
colorBar.set_label(CBLabelText,labelpad=kwds['CBLabelPad'])
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return cax
def pltNetLegendColorbar3Classes(pDf,**kwds):
"""
Zeichnet auf gca() die ergaenzenden Legendeninformationen bei 3 Klassen.
* scatters the Top-Symbol
* scatters the Bot-Symbol
* the "Mid-Symbol" is the (already existing) colorbar with (already existing) ticks and ticklabels
Args:
pDf: dataFrame
Category:
* pMCategory: colName in pDf (default: 'MCategory')
* pMCatTopText
* pMCatMidText
* pMCatBotText
CBLegend (3Classes) - Parameterization of the representative Symbols
* CBLe3cTopVPad (default: 1+1*1/4)
* CBLe3cMidVPad (default: .5)
* CBLe3cBotVPad (default: 0-1*1/4)
* "1" is the height of the Colorbar
* the VPads (the vertical Sy-Positions) are defined in cax.transAxes Coordinates
* cax is the Colorbar Axes
* CBLe3cSySize=10**2 (Sy-Area in pts^2)
* CBLe3cSyType='o'
Color:
* pMCatBotColor='violet'
* pMCatTopColor='palegreen'
Returns:
(bbTop, bbMid, bbBot): the boundingBoxes of the 3Classes-Symbols
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# Cats
if 'pMCategory' not in keys:
kwds['pMCategory']='MCategory'
if 'pMCatTopText' not in keys:
kwds['pMCatTopText']='Top'
if 'pMCatMidText' not in keys:
kwds['pMCatMidText']='Middle'
if 'pMCatBotText' not in keys:
kwds['pMCatBotText']='Bottom'
# CBLegend3Cats
if 'CBLe3cTopVPad' not in keys:
kwds['CBLe3cTopVPad']=1+1*1/4
if 'CBLe3cMidVPad' not in keys:
kwds['CBLe3cMidVPad']=.5
if 'CBLe3cBotVPad' not in keys:
kwds['CBLe3cBotVPad']=0-1*1/4
if 'CBLe3cSySize' not in keys:
kwds['CBLe3cSySize']=10**2
if 'CBLe3cSyType' not in keys:
kwds['CBLe3cSyType']='o'
# CatAttribs
if 'pMCatTopColor' not in keys:
kwds['pMCatTopColor']='palegreen'
if 'pMCatBotColor' not in keys:
kwds['pMCatBotColor']='violet'
except:
pass
try:
cax=plt.gca()
pDf_top=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatTopTxt'])]
pDf_mid=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatMidTxt'])]
pDf_bot=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatBotTxt'])]
pDf_top_Anz,col=pDf_top.shape
pDf_mid_Anz,col=pDf_mid.shape
pDf_bot_Anz,col=pDf_bot.shape
logger.debug("{:s} pDf_bot_Anz={:d} pDf_mid_Anz={:d} pDf_top_Anz={:d}".format(logStr,pDf_bot_Anz,pDf_mid_Anz,pDf_top_Anz))
logger.debug("{:s} CBLe3cBotVPad={:f} CBLe3cMidVPad={:f} CBLe3cTopVPad={:f}".format(logStr,kwds['CBLe3cBotVPad'],kwds['CBLe3cMidVPad'],kwds['CBLe3cTopVPad']))
bbBot=None
bbMid=None
bbTop=None
if pDf_bot_Anz >= 0:
po=cax.scatter( 0.,kwds['CBLe3cBotVPad']
,s=kwds['CBLe3cSySize']
,c=kwds['pMCatBotColor']
,alpha=0.9
,edgecolors='face'
,clip_on=False
,marker=pltHlpAlignMarker(kwds['CBLe3cSyType'], halign='left')
)
# Text dazu
o=po.findobj(match=None)
p=o[0]
bbBot=p.get_datalim(cax.transAxes)
logger.debug("{:s} bbBot={!s:s}".format(logStr,bbBot))
# a=plt.annotate(pMCatBotText
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0),CBLe3cBotVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatBotColor
# )
# # weiterer Text dazu
# a=plt.annotate("Anz HA: {:6d}".format(pDf_bot_Anz)
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0)+.5,CBLe3cBotVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatBotColor
# )
if pDf_top_Anz >= 0:
po=cax.scatter( 0.,kwds['CBLe3cTopVPad']
,s=kwds['CBLe3cSySize']
,c=kwds['pMCatTopColor']
,alpha=0.9
,edgecolors='face'
,clip_on=False
,marker=pltHlpAlignMarker(kwds['CBLe3cSyType'], halign='left')
)
o=po.findobj(match=None)
p=o[0]
bbTop=p.get_datalim(cax.transAxes)
# #Text dazu
# o=po.findobj(match=None)
# p=o[0]
# bb=p.get_datalim(cax.transAxes)
# bbTop=bb
# a=plt.annotate(pMCatTopText
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0),CBLe3cTopVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatTopColor
# )
# #weiterer Text dazu
# a=plt.annotate("Anz HA: {:6d}".format(pDf_top_Anz)
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad++CBLe3cTextSpaceFactor*(bb.x1-bb.x0)+.5,CBLe3cTopVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatTopColor
# )
if pDf_mid_Anz >= 0:
po=cax.scatter( 0.,kwds['CBLe3cMidVPad']
,s=kwds['CBLe3cSySize']
,c='lightgrey'
,alpha=0.9
,edgecolors='face'
,clip_on=False
,visible=False # es erden nur die Koordinaten benoetigt
,marker=pltHlpAlignMarker(kwds['CBLe3cSyType'], halign='left')
)
o=po.findobj(match=None)
p=o[0]
bbMid=p.get_datalim(cax.transAxes)
# #Text dazu
# o=po.findobj(match=None)
# p=o[0]
# bb=p.get_datalim(cax.transAxes)
# a=plt.annotate(pMCatMidText
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0),CBLe3cMidVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatMidColor
# ,visible=False
# )
# #weiterer Text dazu
# a=plt.annotate("Anz HA: {:6d}".format(pDf_mid_Anz)
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0)+.5,CBLe3cMidVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatMidColor
# ,visible=False
# )
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return (bbTop, bbMid, bbBot)
def pltNetLegendTitleblock(text='',**kwds):
"""
Zeichnet auf gca() ergaenzende Schriftfeldinformationen.
Args:
text
Parametrierung:
* anchorVertical
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
if 'anchorVertical' not in keys:
kwds['anchorVertical']=1.
except:
pass
cax=plt.gca()
try:
a=plt.text( 0.
,kwds['anchorVertical']
,text
,transform=cax.transAxes
,family='monospace'
,size='smaller'
,rotation='vertical'
,va='bottom'
,ha='left'
)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltNetTextblock(text='',**kwds):
"""
Zeichnet einen Textblock auf gca().
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
if 'x' not in keys:
kwds['x']=0.
if 'y' not in keys:
kwds['y']=1.
except:
pass
ax=plt.gca()
try:
a=plt.text( kwds['x']
,kwds['y']
,text
,transform=ax.transAxes
,family='monospace'
,size='smaller'
,rotation='horizontal'
,va='bottom'
,ha='left'
)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
class Rm():
@classmethod
def pltNetPipes(cls,pDf,**kwds):
"""
Plots colored PIPES.
Args:
DATA:
pDf: dataFrame
* query: query to filter pDf; default: None; Exp.: ="CONT_ID == '1001'"
* fmask: function to filter pDf; default: None; Exp.: =lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
* query and fmask are used both (query 1st) if not None
* sort_values_by: list of colNames defining the plot order; default: None (d.h. die Plotreihenfolge - und damit die z-Order - ist dann die pDF-Reihenfolge)
* sort_values_ascending; default: False (d.h. kleine zuletzt und damit (wenn pAttrLineSize = pAttribute/pAttributeFunc) auch dünne über dicke); nur relevant bei sort_values_by
AXES:
pAx: Axes to be plotted on; if not specified: gca() is used
Colorlegend:
* CBFraction in % (default: 5)
* CBHpad (default: 0.05)
* CBLabel (default: pAttribute/pAttributeFunc)
* CBBinTicks (default: None, d.h. keine Vorgabe von Außen); Vorgabe N: N yTicks; bei diskreten CM gemeint im Sinne von N-1 diskreten Kategorien
* CBBinDiscrete (default: False, d.h. eine gegebene (kontinuierliche) CM wird nicht in eine diskrete gewandelt)
* wenn CBBinDiscrete, dann gilt N aus CBBinTicks fuer die Ticks (bzw. Kategorien); ist CBBinTicks undef. gilt 4 (also 3 Kategorien)
* bei den vorgenannten Kategorien handelt es sich um eine gleichmäßige Unterteilung des definierten Wertebereiches
* CBBinBounds (default: None): wenn die CM eine diskrete ist, dann wird eine vorgegebene BoundaryNorm angewandt; CBBinTicks hat dann keine Bedeutung
* CBTicks: individuell vorgegebene Ticks; wird am Schluss prozessiert, d.h. vorh. (ggf. auch durch CBBinTicks bzw. <=/>= u. v=/^= bereits manipulierte) ...
* ... Ticks werden überschrieben; kann ohne CBTickLabels verwendet werden
* CBTickLabels: individuell vorgegebene Ticklabels; wird danach prozessiert; Länge muss zu dann existierenden Ticks passen; kann auch ohne CBTicks verwendet werden
PIPE-Attribute:
* pAttribute: column in pDf (default: 'Attribute')
* pAttributeFunc:
* function to be used to construct a new col to be plotted
* if pAttributeFunc is not None pAttribute is not used: pAttribute is set to 'pAttributeFunc'
* the new constructed col is named 'pAttributeFunc'; this name can be used in sort_values_by
PIPE-Color:
* pAttributeColorMap (default: plt.cm.cool)
* Farbskalamapping:
* ------------------
* pAttributeColorMapMin (default: pAttribute.min()); ordnet der kleinsten Farbe einen Wert zu; CM: wenn angegeben _und unterschritten: <=
* pAttributeColorMapMax (default: pAttribute.max()); ordnet der größten Farbe einen Wert zu; CM: wenn angegeben _und überschritten: >=
* Standard: Farbskala wird voll ausgenutzt; d.h. der (ggf. mit Min/Max) eingegrenzte Wertebereich wird den Randfarben der Skala zugeordnet
* wenn ein anderer, kleinerer, Wertebereich mit derselben Farbskala geplottet wird, dann sind die Farben in den Plots nicht vergleichbar ...
* ... wenn eine Farbvergleichbarkeit erzielt werden soll, darf dieselbe Farbskala nicht voll ausgenutzt werden
* pAttributeColorMapUsageStart (default: 0.; Wertebereich: [0,1[)
* hier: die Farbskala wird unten nur ab UsageStart genutzt ...
* ... d.h. Werte die eine "kleinere" Farbe hätten, bekommen die Farbe von UsageStart; CM: v=
* pAttributeColorMapUsageEnd (default: 1.; Wertebereich: ]0,1])
* hier: die Farbskala wird oben nur bis UsageEnd genutzt ...
* ... d.h. Werte die eine "größere" Farbe hätten, bekommen die Farbe von UsageEnd; CM: ^=
* etwas anderes ist es, wenn man eine Farbskala an den Rändern nicht voll ausnutzen möchte weil einem die Farben dort nicht gefallen ...
PIPE-Color 2nd:
* um "unwichtige" Bereiche zu "dimmen": Beispiele:
* räumlich: nicht-Schnitt Bereiche; Bestand (2nd) vs. Ausbau; Zonen unwichtig (2nd) vs. Zonen wichtig; Ok (2nd) von NOK
* es werden erst die 2nd-Color Pipes gezeichnet; die (1st-)Color Pipes werden danach gezeichnet, liegen also "über" den "unwichtigen"
* es wird dieselbe Spalte pAttribute/pAttributeFunc für die 2. Farbskala verwendet
* es wird derselbe Linienstil (pAttributeLs) für die 2. Farbskala verwendet
* es wird dieselbe Dicke pAttrLineSize (pAttribute/pAttributeFunc) für die 2. Farbskala verwendet
* nur die Farbskala ist anders sowie ggf. das Farbskalamapping
* pAttributeColorMapFmask: function to filter pDf to decide to plot with colorMap; default: =lambda row: True
* pAttributeColorMap2ndFmask: function to filter pDf to decide to plot with colorMap2nd; default: =lambda row: False
* mit den beiden Funktionsmasken kann eine Filterung zusätzlich zu query und fmask realisiert werden
* die Funktionsmasken sollten schnittmengenfrei sein; wenn nicht: 2nd überschreibt
* pAttributeColorMap2nd (default: plt.cm.binary)
* Farbskalamapping:
* ------------------
* pAttributeColorMap2ndMin (default: pAttributeColorMapMin)
* pAttributeColorMap2ndMax (default: pAttributeColorMapMax)
* die Farbskala wird an den Rändern nicht voll ausgenutzt wenn die Farben dort ggf. nicht gefallen:
* pAttributeColorMap2ndUsageStart (default: 0.; Wertebereich: [0,1[)
* pAttributeColorMap2ndUsageEnd (default: 1.; Wertebereich: ]0,1])
PIPE-Linestyle:
* pAttributeLs (default: '-')
* same for all colors if mutliple colors are specified
PIPE-Linesize:
* pAttrLineSize: column in pDf; if not specified: pAttribute/pAttributeFunc
* pAttrLineSizeFactor (>0): plot linewidth in pts = pAttrLineSizeFactor (default: =...) * fabs(pAttrLineSize)
* ...: 1./(pDf[pAttrLineSize].std()*2.)
* same for all colors if mutliple colors are specified
PIPE-Geometry:
* pWAYPXCors: column in pDf (default: 'pWAYPXCors')
* pWAYPYCors: column in pDf (default: 'pWAYPYCors')
* pClip (default: True)
>>> import pandas as pd
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import matplotlib.gridspec as gridspec
>>> import math
>>> # ---
>>> try:
... import Rm
... except ImportError:
... from PT3S import Rm
>>> # ---
>>> xm=xms['DHNetwork']
>>> #mx=mxs['DHNetwork']
>>> # ---
>>> plt.close()
>>> size_DINA3quer=(16.5, 11.7)
>>> dpiSize=72
>>> fig=plt.figure(figsize=size_DINA3quer,dpi=dpiSize)
>>> gs = gridspec.GridSpec(4, 2)
>>> # ---
>>> vROHR=xm.dataFrames['vROHR']
>>> # ---
>>> # Attribute (with neg. Values)
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttribute='ROHR~*~*~*~QMAV'
... )
>>> txt=axNfd.set_title('RL QMAV')
>>> # ---
>>> # Function as Attribute
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[1])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... )
>>> txt=axNfd.set_title('RL QMAV Abs')
>>> # --------------------------
>>> # ---
>>> # Mi/MaD zS auf
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[2])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1600.
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... )
>>> txt=axNfd.set_title('Mi/MaD zS auf')
>>> # --------------------------
>>> # ---
>>> # ind. Kategorien
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[3])
>>> cm = matplotlib.colors.ListedColormap(['cyan', 'royalblue', 'magenta', 'coral'])
>>> cm.set_over('0.25')
>>> cm.set_under('0.75')
>>> bounds = [10.,100.,200.,800.,1600.]
>>> norm = matplotlib.colors.BoundaryNorm(bounds, cm.N)
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMap=cm
... ,CBBinBounds=bounds
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... )
>>> txt=axNfd.set_title('ind. Kategorien')
>>> # --------------------------
>>> # ---
>>> # Unwichtiges ausblenden über 2nd Color
>>> # --------------------------
>>> vAGSN=xm.dataFrames['vAGSN']
>>> hpRL=vAGSN[(vAGSN['LFDNR']=='1') & (vAGSN['Layer']==2)]
>>> pDf=pd.merge(vROHR
... ,hpRL[hpRL.IptIdx=='S'] # wg. Innenpunkte
... ,how='left'
... ,left_on='pk'
... ,right_on='OBJID'
... ,suffixes=('','_AGSN')).filter(items=vROHR.columns.tolist()+['OBJID'])
>>> axNfd = fig.add_subplot(gs[4])
>>> Rm.Rm.pltNetPipes(pDf
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1500.
... ,CBBinTicks=7
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... ,pAttributeColorMapFmask=lambda row: True if not pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndFmask=lambda row: True if pd.isnull(row.OBJID) else False
... )
>>> txt=axNfd.set_title('Unwichtiges ausblenden über 2nd Color')
>>> # --------------------------
>>> # ---
>>> # Farbskalen an den Rändern abschneiden
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[5])
>>> Rm.Rm.pltNetPipes(pDf
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1500.
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... ,pAttributeColorMapFmask=lambda row: True if not pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndFmask=lambda row: True if pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndUsageStart=.5/5. # nicht zu weiß
... ,pAttributeColorMap2ndUsageEnd=2.5/5. # nicht zu schwarz
... ,pAttributeColorMapUsageStart=3/15.
... ,pAttributeColorMapUsageEnd=12/15.
... )
>>> txt=axNfd.set_title('Farbskalen an den Rändern abschneiden')
>>> # --------------------------
>>> # ---
>>> # Farbskala diskretisieren
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[6])
>>> Rm.Rm.pltNetPipes(pDf
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1500.
... ,CBBinDiscrete=True
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... ,pAttributeColorMapFmask=lambda row: True if not pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndFmask=lambda row: True if pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndUsageStart=.5/5. # nicht zu weiß
... ,pAttributeColorMap2ndUsageEnd=2.5/5. # nicht zu schwarz
... ,CBTicks=[250,750,1250]
... ,CBTickLabels=['klein','mittel','groß']
... )
>>> txt=axNfd.set_title('Farbskala diskretisieren')
>>> # --------------------------
>>> # ---
>>> # Unterkategorien
>>> # --------------------------
>>> baseColorsDef="tab10"
>>> catagoryColors=[9,6,1]
>>> nOfSubCatsReq=4
>>> cm=Rm.pltMakeCategoricalCmap(baseColorsDef=baseColorsDef,catagoryColors=catagoryColors,nOfSubCatsReq=nOfSubCatsReq,reversedSubCatOrder=True)
>>> axNfd = fig.add_subplot(gs[7])
>>> Rm.Rm.pltNetPipes(pDf
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMap=cm
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1500.
... ,CBBinTicks=16
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... ,pAttributeColorMapFmask=lambda row: True if not pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndFmask=lambda row: True if pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndUsageStart=.5/5. # nicht zu weiß
... ,pAttributeColorMap2ndUsageEnd=2.5/5. # nicht zu schwarz
... )
>>> txt=axNfd.set_title('Unterkategorien')
>>> # --------------------------
>>> gs.tight_layout(fig)
>>> plt.show()
>>> plt.savefig('pltNetPipes.pdf',format='pdf',dpi=dpiSize*2)
>>> # -----
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3q,dpi=dpiSize)
>>> gs = gridspec.GridSpec(1, 1)
>>> # ---
>>> #
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' and row.LTGR_NAME=='NWDUF2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... )
>>> txt=axNfd.set_title('RL QMAV Abs (Ausschnitt)')
>>> gs.tight_layout(fig)
>>> plt.show()
>>> # -----
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3,dpi=dpiSize)
>>> gs = gridspec.GridSpec(1, 1)
>>> # ---
>>> #
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' and row.LTGR_NAME=='NWDUF2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... )
>>> txt=axNfd.set_title('RL QMAV Abs (Ausschnitt)')
>>> gs.tight_layout(fig)
>>> plt.show()
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# AXES
if 'pAx' not in keys:
kwds['pAx']=plt.gca()
# CB
if 'CBFraction' not in keys:
kwds['CBFraction']=5 # in %
if 'CBHpad' not in keys:
kwds['CBHpad']=0.05
if 'CBLabel' not in keys:
kwds['CBLabel']=None
# CB / Farbskala
if 'CBBinTicks' not in keys:
kwds['CBBinTicks']=None
if 'CBBinDiscrete' not in keys:
kwds['CBBinDiscrete']=False
if kwds['CBBinDiscrete']:
if kwds['CBBinTicks']==None:
kwds['CBBinTicks']=4 # (d.h. 3 Kategorien)
if 'CBBinBounds' not in keys:
kwds['CBBinBounds']=None
# customized yTicks
if 'CBTicks' not in keys:
kwds['CBTicks'] = None
if 'CBTickLabels' not in keys:
kwds['CBTickLabels'] = None
# DATA
if 'query' not in keys:
kwds['query']=None # Exp.: = "KVR_i=='2' & KVR_k=='2'"
if 'fmask' not in keys:
kwds['fmask']=None # Exp.: =lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
if 'sort_values_by' not in keys:
kwds['sort_values_by']=None
if 'sort_values_ascending' not in keys:
kwds['sort_values_ascending']=False
# PIPE-Attribute
if 'pAttribute' not in keys:
kwds['pAttribute']='Attribute'
if 'pAttributeFunc' not in keys:
logger.debug("{:s}pAttribute: not specified?! 'Attribute' will be used. pAttributeFunc is also not specified?!".format(logStr))
if 'pAttributeFunc' not in keys:
kwds['pAttributeFunc']=None
# PIPE-Color
if 'pAttributeColorMap' not in keys:
kwds['pAttributeColorMap']=plt.cm.cool
if 'pAttributeColorMapMin' not in keys:
kwds['pAttributeColorMapMin']=None
if 'pAttributeColorMapMax' not in keys:
kwds['pAttributeColorMapMax']=None
# Trunc Cmap
if 'pAttributeColorMapUsageStart' not in keys and 'pAttributeColorMapUsageEnd' not in keys:
kwds['pAttributeColorMapTrunc']=False
else:
kwds['pAttributeColorMapTrunc']=True
if 'pAttributeColorMapUsageStart' not in keys:
kwds['pAttributeColorMapUsageStart']=0.
if 'pAttributeColorMapUsageEnd' not in keys:
kwds['pAttributeColorMapUsageEnd']=1.
# PIPE-Color 1st/2nd - FMasks
if 'pAttributeColorMapFmask' not in keys:
kwds['pAttributeColorMapFmask']=lambda row: True
else:
logger.debug("{:s}Color 1st-PIPEs are filtered with fmask: {:s} ...".format(logStr,str(kwds['pAttributeColorMapFmask'])))
if 'pAttributeColorMap2ndFmask' not in keys:
kwds['pAttributeColorMap2ndFmask']=lambda row: False
else:
logger.debug("{:s}Color 2nd-PIPEs are filtered with fmask: {:s} ...".format(logStr,str(kwds['pAttributeColorMap2ndFmask'])))
# PIPE-Color 2nd
if 'pAttributeColorMap2nd' not in keys:
kwds['pAttributeColorMap2nd']=plt.cm.binary
if 'pAttributeColorMap2ndMin' not in keys:
kwds['pAttributeColorMap2ndMin']=kwds['pAttributeColorMapMin']
if 'pAttributeColorMap2ndMax' not in keys:
kwds['pAttributeColorMap2ndMax']=kwds['pAttributeColorMapMax']
# Trunc Cmap
if 'pAttributeColorMap2ndUsageStart' not in keys and 'pAttributeColorMap2ndUsageEnd' not in keys:
kwds['pAttributeColorMap2ndTrunc']=False
else:
kwds['pAttributeColorMap2ndTrunc']=True
if 'pAttributeColorMap2ndUsageStart' not in keys:
kwds['pAttributeColorMap2ndUsageStart']=0.
if 'pAttributeColorMap2ndUsageEnd' not in keys:
kwds['pAttributeColorMap2ndUsageEnd']=1.
# PIPE-Linestyle
if 'pAttributeLs' not in keys:
kwds['pAttributeLs']='-'
# PIPE-Linesize
if 'pAttrLineSize' not in keys:
kwds['pAttrLineSize']=None
if 'pAttrLineSizeFactor' not in keys:
kwds['pAttrLineSizeFactor']=None
# PIPE-Geometry
if 'pWAYPXCors' not in keys:
kwds['pWAYPXCors']='pWAYPXCors'
if 'pWAYPYCors' not in keys:
kwds['pWAYPYCors']='pWAYPYCors'
if 'pClip' not in keys:
kwds['pClip']=True
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
try:
# ggf. filtern
if kwds['query'] != None:
logger.debug("{:s}pDf is filtered with query: {:s} ...".format(logStr,str(kwds['query'])))
pDf=pd.DataFrame(pDf.query(kwds['query']).values,columns=pDf.columns)
if kwds['fmask'] != None:
logger.debug("{:s}pDf is filtered with fmask: {:s} ...".format(logStr,str(kwds['fmask'])))
pDf=pd.DataFrame(pDf[pDf.apply(kwds['fmask'],axis=1)].values,columns=pDf.columns)
# ggf. zu plottende Spalte(n) neu ausrechnen bzw. Plotreihenfolge ändern: Kopie erstellen
if kwds['pAttributeFunc'] != None or kwds['sort_values_by'] != None:
# Kopie!
logger.debug("{:s}pDf is copied ...".format(logStr))
pDf=pDf.copy(deep=True)
# ggf. zu plottende Spalte(n) neu ausrechnen
if kwds['pAttributeFunc'] != None:
logger.debug("{:s}pAttribute: col '{:s}' is not used: ...".format(logStr,kwds['pAttribute']))
logger.debug("{:s}... pAttributeFunc {:s} is used to calculate a new col named 'pAttributeFunc'".format(logStr,str(kwds['pAttributeFunc'])))
pDf['pAttributeFunc']=pDf.apply(kwds['pAttributeFunc'],axis=1)
kwds['pAttribute']='pAttributeFunc'
logger.debug("{:s}col '{:s}' is used as Attribute.".format(logStr,kwds['pAttribute']))
# Label für CB
if kwds['CBLabel'] == None:
kwds['CBLabel']=kwds['pAttribute']
# Spalte für Liniendicke ermitteln
if kwds['pAttrLineSize'] == None:
kwds['pAttrLineSize']=kwds['pAttribute']
logger.debug("{:s}col '{:s}' is used as LineSize.".format(logStr,kwds['pAttrLineSize']))
# Liniendicke skalieren
if kwds['pAttrLineSizeFactor']==None:
kwds['pAttrLineSizeFactor']=1./(pDf[kwds['pAttrLineSize']].std()*2.)
logger.debug("{:s}Faktor Liniendicke: {:12.6f} - eine Linie mit Attributwert {:6.2f} wird in {:6.2f} Pts Dicke geplottet.".format(logStr
,kwds['pAttrLineSizeFactor']
,pDf[kwds['pAttrLineSize']].std()*2.
,kwds['pAttrLineSizeFactor']*pDf[kwds['pAttrLineSize']].std()*2.
))
logger.debug("{:s}min. Liniendicke: Attributwert {:9.2f} Pts: {:6.2f}.".format(logStr
,math.fabs(pDf[kwds['pAttrLineSize']].min())
,kwds['pAttrLineSizeFactor']*math.fabs(pDf[kwds['pAttrLineSize']].min()))
)
logger.debug("{:s}max. Liniendicke: Attributwert {:9.2f} Pts: {:6.2f}.".format(logStr
,math.fabs(pDf[kwds['pAttrLineSize']].max())
,kwds['pAttrLineSizeFactor']*math.fabs(pDf[kwds['pAttrLineSize']].max()))
)
# ggf. Plotreihenfolge ändern
if kwds['sort_values_by'] != None:
logger.debug("{:s}pDf is sorted (=Plotreihenfolge) by {:s} ascending={:s}.".format(logStr,str(kwds['sort_values_by']),str(kwds['sort_values_ascending'])))
pDf.sort_values(by=kwds['sort_values_by'],ascending=kwds['sort_values_ascending'],inplace=True)
# ----------------------------------------------------------------------------------------------------------------------------------------
# x,y-Achsen: Lims ermitteln und setzen (das Setzen beeinflusst Ticks und data_ratio; ohne dieses Setzen wären diese auf Standardwerten)
# ----------------------------------------------------------------------------------------------------------------------------------------
xMin=923456789
yMin=923456789
xMax=0
yMax=0
for xs,ys in zip(pDf[kwds['pWAYPXCors']],pDf[kwds['pWAYPYCors']]):
xMin=min(xMin,min(xs))
yMin=min(yMin,min(ys))
xMax=max(xMax,max(xs))
yMax=max(yMax,max(ys))
logger.debug("{:s}pWAYPXCors: {:s} Min: {:6.2f} Max: {:6.2f}".format(logStr,kwds['pWAYPXCors'],xMin,xMax))
logger.debug("{:s}pWAYPYCors: {:s} Min: {:6.2f} Max: {:6.2f}".format(logStr,kwds['pWAYPYCors'],yMin,yMax))
dx=xMax-xMin
dy=yMax-yMin
dxdy=dx/dy
dydx=1./dxdy
# i.d.R. "krumme" Grenzen (die Ticks werden von mpl i.d.R. trotzdem "glatt" ermittelt)
kwds['pAx'].set_xlim(xMin,xMax)
kwds['pAx'].set_ylim(yMin,yMax)
# ----------------------------------------------------------------------------------------------------------------------------------------
# x,y-Achsen: Ticks ermitteln aber NICHT verändern -----------------------------------------------------------------
# auch bei "krummen" Grenzen setzt matplotlib i.d.R. "glatte" Ticks
# ----------------------------------------------------------------------------------------------------------------------------------------
# Ticks ermitteln
xTicks=kwds['pAx'].get_xticks()
yTicks=kwds['pAx'].get_yticks()
dxTick = xTicks[1]-xTicks[0]
xTickSpan=xTicks[-1]-xTicks[0]
dyTick = yTicks[1]-yTicks[0]
yTickSpan=yTicks[-1]-yTicks[0]
logger.debug("{:s}xTicks : {:s} dx: {:6.2f}".format(logStr,str(xTicks),dxTick))
logger.debug("{:s}yTicks : {:s} dy: {:6.2f}".format(logStr,str(yTicks),dyTick))
# dTick gleich setzen (deaktiviert)
if dyTick == dxTick:
pass # nichts zu tun
elif dyTick > dxTick:
# dyTick zu dxTick (kleinere) setzen
dTickW=dxTick
# erf. Anzahl
numOfTicksErf=math.floor(dy/dTickW)+1
newTicks=[idx*dTickW+yTicks[0] for idx in range(numOfTicksErf)]
#kwds['pAx'].set_yticks(newTicks)
#yTicks=kwds['pAx'].get_yticks()
#dyTick = yTicks[1]-yTicks[0]
#logger.debug("{:s}yTicks NEU: {:s} dy: {:6.2f}".format(logStr,str(yTicks),dyTick))
else:
# dxTick zu dyTick (kleinere) setzen
dTickW=dyTick
# erf. Anzahl
numOfTicksErf=math.floor(dx/dTickW)+1
newTicks=[idx*dTickW+xTicks[0] for idx in range(numOfTicksErf)]
#kwds['pAx'].set_xticks(newTicks)
#xTicks=kwds['pAx'].get_xticks()
#dxTick = xTicks[1]-xTicks[0]
#logger.debug("{:s}xTicks NEU: {:s} dx: {:6.2f}".format(logStr,str(xTicks),dxTick))
# ----------------------------------------------------------------------------------------------------------------------------------------
# Grid und Aspect
# ----------------------------------------------------------------------------------------------------------------------------------------
kwds['pAx'].grid()
kwds['pAx'].set_aspect(aspect='equal') # zur Sicherheit; andere als verzerrungsfreie Darstellungen machen im Netz kaum Sinn
kwds['pAx'].set_adjustable('box')
kwds['pAx'].set_anchor('SW')
## x,y-Seitenverhältnisse ermitteln ---------------------------------------------------------------------------
## total figure size
#figW, figH = kwds['pAx'].get_figure().get_size_inches()
## Axis pos. on figure
#x0, y0, w, h = kwds['pAx'].get_position().bounds
## Ratio of display units
#disp_ratio = (figH * h) / (figW * w)
#disp_ratioA = (figH) / (figW )
#disp_ratioB = (h) / (w)
## Ratio of data units
#data_ratio=kwds['pAx'].get_data_ratio()
#logger.debug("{:s}figW: {:6.2f} figH: {:6.2f}".format(logStr,figW,figH))
#logger.debug("{:s}x0: {:6.2f} y0: {:6.2f} w: {:6.2f} h: {:6.2f}".format(logStr,x0,y0,w,h))
#logger.debug("{:s}pWAYPCors: Y/X: {:6.2f}".format(logStr,dydx))
#logger.debug("{:s}Ticks: Y/X: {:6.2f}".format(logStr,yTickSpan/xTickSpan))
#logger.debug("{:s}disp_ratio: {:6.2f} data_ratio: {:6.2f}".format(logStr,disp_ratio,data_ratio))
#logger.debug("{:s}disp_ratioA: {:6.2f} disp_ratioB: {:6.2f}".format(logStr,disp_ratioA,disp_ratioB))
# PIPE-Color: Farbskalamapping:
cMap=plt.cm.get_cmap(kwds['pAttributeColorMap'])
if kwds['CBBinDiscrete'] and hasattr(cMap,'from_list'): # diskrete Farbskala aus kontinuierlicher erzeugen
N=kwds['CBBinTicks']-1
color_list = cMap(np.linspace(0, 1, N))
cmap_name = cMap.name + str(N)
kwds['pAttributeColorMap']=cMap.from_list(cmap_name, color_list, N)
minAttr=pDf[kwds['pAttribute']].min()
maxAttr=pDf[kwds['pAttribute']].max()
if kwds['pAttributeColorMapMin'] != None:
minLine=kwds['pAttributeColorMapMin']
else:
minLine=minAttr
if kwds['pAttributeColorMapMax'] != None:
maxLine=kwds['pAttributeColorMapMax']
else:
maxLine=maxAttr
logger.debug("{:s}Attribute: minLine (used for CM-Scaling): {:8.2f} min (Data): {:8.2f}".format(logStr,minLine,minAttr))
logger.debug("{:s}Attribute: maxLine (used for CM-Scaling): {:8.2f} max (Data): {:8.2f}".format(logStr,maxLine,maxAttr))
# Norm
normLine=colors.Normalize(minLine,maxLine)
# kont. Farbskala truncated: Farbskala und Norm anpassen
cMap=plt.cm.get_cmap(kwds['pAttributeColorMap'])
if kwds['pAttributeColorMapTrunc'] and hasattr(cMap,'from_list'):
#
usageStartLineValue=minLine+kwds['pAttributeColorMapUsageStart']*(maxLine-minLine)
usageStartLineColor=kwds['pAttributeColorMap'](normLine(usageStartLineValue))
logger.debug("{:s}pAttributeColorMapUsageStart: {:6.2f} ==> usageStartLineValue: {:8.2f} (minLine: {:8.2f}) color: {:s}".format(logStr
,kwds['pAttributeColorMapUsageStart']
,usageStartLineValue,minLine,str(usageStartLineColor)))
#
usageEndLineValue=maxLine-(1.-kwds['pAttributeColorMapUsageEnd'])*(maxLine-minLine)
usageEndLineColor=kwds['pAttributeColorMap'](normLine(usageEndLineValue))
logger.debug("{:s}pAttributeColorMapUsageEnd: {:6.2f} ==> usageEndLineValue: {:8.2f} (maxLine: {:8.2f}) color: {:s}".format(logStr
,kwds['pAttributeColorMapUsageEnd']
,usageEndLineValue,maxLine,str(usageEndLineColor)))
nColors=100
kwds['pAttributeColorMap'] = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cMap.name, a=kwds['pAttributeColorMapUsageStart'], b=kwds['pAttributeColorMapUsageEnd'])
,cMap(np.linspace(kwds['pAttributeColorMapUsageStart'],kwds['pAttributeColorMapUsageEnd'],nColors)))
normLine=colors.Normalize(max(minLine,usageStartLineValue),min(maxLine,usageEndLineValue))
# diskrete Farbskala mit individuellen Kategorien: Norm anpassen
cMap=plt.cm.get_cmap(kwds['pAttributeColorMap'])
if kwds['CBBinBounds'] != None and not hasattr(cMap,'from_list'): # diskrete Farbskala liegt vor und Bounds sind vorgegeben
normLine = colors.BoundaryNorm(kwds['CBBinBounds'],cMap.N)
#CBPropExtend='both'
CBPropExtend='neither'
else:
CBPropExtend='neither'
# PIPE-Color 2nd: Farbskalamapping:
if kwds['pAttributeColorMap2ndMin'] != None:
minLine2nd=kwds['pAttributeColorMap2ndMin']
else:
minLine2nd=minAttr
if kwds['pAttributeColorMap2ndMax'] != None:
maxLine2nd=kwds['pAttributeColorMap2ndMax']
else:
maxLine2nd=maxAttr
logger.debug("{:s}Attribute: minLine2nd (used for CM-Scaling): {:8.2f} min (Data): {:8.2f}".format(logStr,minLine2nd,minAttr))
logger.debug("{:s}Attribute: maxLine2nd (used for CM-Scaling): {:8.2f} max (Data): {:8.2f}".format(logStr,maxLine2nd,maxAttr))
# Norm
normLine2nd=colors.Normalize(minLine2nd,maxLine2nd)
# kont. Farbskala truncated: Farbskala anpassen
cMap=plt.cm.get_cmap(kwds['pAttributeColorMap2nd'])
if kwds['pAttributeColorMap2ndTrunc'] and hasattr(cMap,'from_list'):
#
usageStartLineValue2nd=minLine2nd+kwds['pAttributeColorMap2ndUsageStart']*(maxLine2nd-minLine2nd)
logger.debug("{:s}pAttributeColorMap2ndUsageStart: {:8.2f} ==> usageStartLineValue2nd: {:8.2f} (minLine2nd: {:8.2f})".format(logStr,kwds['pAttributeColorMap2ndUsageStart'],usageStartLineValue2nd,minLine2nd))
usageStartLineColor2nd=kwds['pAttributeColorMap2nd'](normLine2nd(usageStartLineValue2nd))
#
usageEndLineValue2nd=maxLine2nd-(1.-kwds['pAttributeColorMap2ndUsageEnd'])*(maxLine2nd-minLine2nd)
logger.debug("{:s}pAttributeColorMap2ndUsageEnd: {:8.2f} ==> usageEndLineValue2nd: {:8.2f} (maxLine2nd: {:8.2f})".format(logStr,kwds['pAttributeColorMap2ndUsageEnd'],usageEndLineValue2nd,maxLine2nd))
usageEndLineColor2nd=kwds['pAttributeColorMap2nd'](normLine2nd(usageEndLineValue2nd))
nColors=100
kwds['pAttributeColorMap2nd'] = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cMap.name, a=kwds['pAttributeColorMap2ndUsageStart'], b=kwds['pAttributeColorMap2ndUsageEnd'])
,cMap(np.linspace(kwds['pAttributeColorMap2ndUsageStart'],kwds['pAttributeColorMap2ndUsageEnd'],nColors)))
# PIPE-Color 2nd: PLOT
pDfColorMap2nd=pDf[pDf.apply(kwds['pAttributeColorMap2ndFmask'],axis=1)]
(rows ,cols)=pDf.shape
(rows2nd,cols)=pDfColorMap2nd.shape
logger.debug("{:s}Color 2nd-PIPEs: {:d} von {:d}".format(logStr,rows2nd,rows))
for xs,ys,vLine,tLine in zip(pDfColorMap2nd[kwds['pWAYPXCors']],pDfColorMap2nd[kwds['pWAYPYCors']],pDfColorMap2nd[kwds['pAttribute']],pDfColorMap2nd[kwds['pAttrLineSize']]):
#if vLine >= usageStartLineValue2nd and vLine <= usageEndLineValue2nd:
# colorLine=kwds['pAttributeColorMap2nd'](normLine2nd(vLine))
#elif vLine > usageEndLineValue2nd:
# colorLine=usageEndLineColor2nd
#else:
# colorLine=usageStartLineColor2nd
colorLine=kwds['pAttributeColorMap2nd'](normLine2nd(vLine))
pcLines=kwds['pAx'].plot(xs,ys
,color=colorLine
,linewidth=kwds['pAttrLineSizeFactor']*math.fabs(tLine)#(vLine)
,ls=kwds['pAttributeLs']
,solid_capstyle='round'
,aa=True
,clip_on=kwds['pClip']
)
# PIPE-Color: PLOT
pDfColorMap=pDf[pDf.apply(kwds['pAttributeColorMapFmask'],axis=1)]
(rows ,cols)=pDf.shape
(rows1st,cols)=pDfColorMap.shape
colorsCBValues=[]
logger.debug("{:s}Color 1st-PIPEs: {:d} von {:d}".format(logStr,rows1st,rows))
for xs,ys,vLine,tLine in zip(pDfColorMap[kwds['pWAYPXCors']],pDfColorMap[kwds['pWAYPYCors']],pDfColorMap[kwds['pAttribute']],pDfColorMap[kwds['pAttrLineSize']]):
#if vLine >= usageStartLineValue and vLine <= usageEndLineValue:
# colorLine=kwds['pAttributeColorMap'](normLine(vLine))
# value=vLine
#elif vLine > usageEndLineValue:
# colorLine=usageEndLineColor
# value=usageEndLineValue
#else:
# colorLine=usageStartLineColor
# value=usageStartLineValue
colorLine=kwds['pAttributeColorMap'](normLine(vLine))
colorsCBValues.append(vLine)
pcLines=kwds['pAx'].plot(xs,ys
,color=colorLine
,linewidth=kwds['pAttrLineSizeFactor']*math.fabs(tLine)#(vLine)
,ls=kwds['pAttributeLs']
,solid_capstyle='round'
,aa=True
,clip_on=kwds['pClip']
)
# PIPE-Color: PLOT der PIPE-Anfänge um Farbskala konstruieren zu koennen
xScatter=[]
yScatter=[]
for xs,ys in zip(pDfColorMap[kwds['pWAYPXCors']],pDfColorMap[kwds['pWAYPYCors']]):
xScatter.append(xs[0])
yScatter.append(ys[0])
s=kwds['pAttrLineSizeFactor']*pDfColorMap[kwds['pAttrLineSize']].apply(lambda x: math.fabs(x))
s=s.apply(lambda x: math.pow(x,2)) # https://stackoverflow.com/questions/14827650/pyplot-scatter-plot-marker-size
#pcN=kwds['pAx'].scatter(pDfColorMap['pXCor_i'],pDfColorMap['pYCor_i']
pcN=kwds['pAx'].scatter(xScatter,yScatter
,s=s
,linewidth=0 # the linewidth of the marker edges
# Farbskala
,cmap=kwds['pAttributeColorMap']
# Normierung Farbe
,norm=normLine
# Werte
,c=colorsCBValues
,edgecolors='none'
,clip_on=kwds['pClip']
)
# CB: Axes
divider = make_axes_locatable(kwds['pAx'])
cax = divider.append_axes('right',size="{:f}%".format(kwds['CBFraction']),pad=kwds['CBHpad'])
x0, y0, w, h = kwds['pAx'].get_position().bounds
#logger.debug("{:s}ohne Änderung?!: x0: {:6.2f} y0: {:6.2f} w: {:6.2f} h: {:6.2f}".format(logStr,x0,y0,w,h))
kwds['pAx'].set_aspect(1.) #!
x0, y0, w, h = kwds['pAx'].get_position().bounds
#logger.debug("{:s}ohne Änderung?!: x0: {:6.2f} y0: {:6.2f} w: {:6.2f} h: {:6.2f}".format(logStr,x0,y0,w,h))
# CB
cB=plt.gcf().colorbar(pcN, cax=cax, orientation='vertical',extend=CBPropExtend,spacing='proportional')
# Label
cB.set_label(kwds['CBLabel'])
# CB Ticks
if kwds['CBBinTicks'] != None:
cB.set_ticks(np.linspace(minLine,maxLine,kwds['CBBinTicks']))
ticks=cB.get_ticks()
try:
ticks=np.unique(np.append(ticks,[usageStartLineValue,usageEndLineValue]))
except:
pass
cB.set_ticks(ticks)
# CB Ticklabels
labels=cB.ax.get_yticklabels()
if kwds['pAttributeColorMapUsageStart'] > 0:
idx=np.where(ticks == usageStartLineValue)
labels[idx[0][0]].set_text(labels[idx[0][0]].get_text()+" v=")
if kwds['pAttributeColorMapUsageEnd'] < 1:
idx=np.where(ticks == usageEndLineValue)
labels[idx[0][0]].set_text(labels[idx[0][0]].get_text()+" ^=")
if kwds['pAttributeColorMapMax'] != None and maxLine<maxAttr:
labels[-1].set_text(labels[-1].get_text()+" >=")
if kwds['pAttributeColorMapMin'] != None and minLine>minAttr:
labels[0].set_text(labels[0].get_text()+" <=")
cB.ax.set_yticklabels(labels)
# customized yTicks --------------------
if kwds['CBTicks'] != None:
cB.set_ticks(kwds['CBTicks'])
if kwds['CBTickLabels'] != None:
labels=cB.ax.get_yticklabels()
if len(labels)==len(kwds['CBTickLabels']):
for label,labelNew in zip(labels,kwds['CBTickLabels']):
label.set_text(labelNew)
cB.ax.set_yticklabels(labels)
else:
logStrFinal="{:s}Error: Anz. CB Ticklabels Ist: {:d} != Anz. Ticklabeles Soll: {:d} ?!".format(logStr,len(labels),len(kwds['CBTickLabels']))
logger.error(logStrFinal)
raise RmError(logStrFinal)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
@classmethod
def pltHP(cls,pDf,**kwds):
"""
Plots a Hydraulic Profile.
Args:
DATA:
pDf: dataFrame
defining the HPLINES (xy-curves) Identification:
the different HPs in pDf are identified by the two cols
NAMECol: default: 'NAME'; set to None if NAMECol is not criteria for Identification ...
and
LayerCol: default: 'Layer'; set to None if LayerCol is not criteria for Identification ...
for each HP several lines (xy-curves) are plotted
... not criteria ...
if NAMECol is None only LayerCol is used
if LayerCol also is None, all rows are treated as "the" HPLINE
defining the HPLINES (xy-curves) Geometry:
* xCol: col in pDf for x; example: 'x'
the col is the same for all HPs and all y
* edgeNodesColSequence: cols to be used for start-node, end-node, next-node; default: ['NAME_i','NAME_k','nextNODE']
* 'NAME'_'Layer' (i.e. Nord-Süd_1) NAMECol_LayerCol is used as an Index in hpLineGeoms
* hpLineGeoms - Example - = {
'V-Abzweig_1':{'masterHP':'AGFW Symposium DH_1','masterNode':'V-3107','matchType':'starts'}
}
- masterHP: Bezugs-Schnitt
- masterNode: muss es in masterHP geben
- masterNode: muss es auch im Schnitt geben bei matchType='matches'; bei 'starts' wird der Anfang gemapped; bei 'ends' das Ende
defining the HPLINES (xy-curves) y-Achsentypen (y-Axes):
* hpLines: list of cols in pDf for y; example: ['P']
each col in hpLines defines a hpLine (a xy-curve) to be plotted
for each identified HP all defined hpLines are plotted
defining the HPLINES (xy-curves) Layout:
# 'NAME'_'Layer'_'hpLineType' (i.e. Nord-Süd_1_P) is used as an Index in hpLineProps
* hpLineProps - Example - = {
'Nord-Süd_1_P':{'label':'VL','color':'red' ,'linestyle':'-','linewidth':3}
,'Nord-Süd_2_P':{'label':'RL','color':'blue','linestyle':'-','linewidth':3}
}
if 'NAME'_'Layer'_'hpLine' not in hpLineProps:
default props are used
if hpLineProps['NAME'_'Layer'_'hpLine'] == None:
HPLINE is not plotted
y-Achsentypen (y-Axes):
* werden ermittelt aus hpLines
* der Spaltenname - z.B. 'P' - wird dabei als Bezeichner für den Achsentyp benutzt
* die Achsen werden erstellt in der Reihenfolge in der sie in hpLines auftreten
* Bezeichner wie 'P','P_1',... werden dabei als vom selben Achsentyp 'P' (selbe y-Achse also) gewertet
* P_1, P_2, ... können z.B. P zu verschiedenen Zeiten sein oder Aggregate über die Zeit wie Min/Max
* yAxesDetectionPattern: regExp mit welcher die Achsentypen ermittelt werden; default: '([\w ]+)(_)(\d+)$'
* yTwinedAxesPosDeltaHPStart: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche; default: -0.0125
* yTwinedAxesPosDeltaHP: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche; default: -0.05
AXES:
pAx: Axes to be plotted on; if not specified: gca() is used
Return:
yAxes: dct with AXES; key=y-Achsentypen
yLines: dct with Line2Ds; key=Index from hpLineProps
xNodeInfs: dct with NodeInformation; key=Index also used in i.e. hpLineGeoms
key: NAMECol_LayerCol
value: dct
key: node
value: dct
kwds['xCol']: x in HP
kwds['xCol']+'Plot': x in HP-Plot
pDfIdx: Index in pDf
>>> # -q -m 0 -s pltHP -y no -z no -w DHNetwork
>>> import pandas as pd
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import matplotlib.gridspec as gridspec
>>> import math
>>> try:
... import Rm
... except ImportError:
... from PT3S import Rm
>>> # ---
>>> xm=xms['DHNetwork']
>>> mx=mxs['DHNetwork']
>>> xm.MxAdd(mx=mx,aggReq=['TIME','TMIN','TMAX'],timeReq=3*[mx.df.index[0]],timeReq2nd=3*[mx.df.index[-1]],viewList=['vAGSN'],ForceNoH5Update=True)
>>> vAGSN=xm.dataFrames['vAGSN']
>>> for PH,P,RHO,Z in zip(['PH','PH_1','PH_2'],['P','P_1','P_2'],['RHO','RHO_1','RHO_2'],['Z','Z_1','Z_2']):
... vAGSN[PH]=vAGSN.apply(lambda row: row[P]*math.pow(10.,5.)/(row[RHO]*9.81),axis=1)
... vAGSN[PH]=vAGSN[PH]+vAGSN[Z].astype('float64')
>>> for bBzg,P,RHO,Z in zip(['bBzg','bBzg_1','bBzg_2'],['P','P_1','P_2'],['RHO','RHO_1','RHO_2'],['Z','Z_1','Z_2']):
... vAGSN[bBzg]=vAGSN.apply(lambda row: row[RHO]*9.81/math.pow(10.,5.),axis=1)
... vAGSN[bBzg]=vAGSN[P]+vAGSN[Z].astype('float64')*vAGSN[bBzg]
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3q,dpi=Rm.dpiSize)
>>> gs = gridspec.GridSpec(3, 1)
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> yAxes,yLines,xNodeInfs=Rm.Rm.pltHP(vAGSN,pAx=axNfd
... ,hpLines=['bBzg','bBzg_1','bBzg_2','Q']
... ,hpLineProps={
... 'AGFW Symposium DH_1_bBzg':{'label':'VL','color':'red' ,'linestyle':'-','linewidth':3}
... ,'AGFW Symposium DH_2_bBzg':{'label':'RL','color':'blue','linestyle':'-','linewidth':3}
... ,'AGFW Symposium DH_2_bBzg_1':{'label':'RL min','color':'blue','linestyle':'-.','linewidth':1}
... ,'AGFW Symposium DH_1_bBzg_2':{'label':'VL max','color':'red' ,'linestyle':'-.','linewidth':1}
... ,'AGFW Symposium DH_1_bBzg_1':None
... ,'AGFW Symposium DH_2_bBzg_2':None
... ,'AGFW Symposium DH_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'AGFW Symposium DH_2_Q':{'label':'RL Q','color':'lightblue','linestyle':'--','linewidth':2}
... }
... )
>>> yAxes.keys()
dict_keys(['bBzg', 'Q'])
>>> yLines.keys()
dict_keys(['AGFW Symposium DH_1_bBzg', 'AGFW Symposium DH_1_bBzg_2', 'AGFW Symposium DH_1_Q', 'AGFW Symposium DH_2_bBzg', 'AGFW Symposium DH_2_bBzg_1', 'AGFW Symposium DH_2_Q'])
>>> txt=axNfd.set_title('HP')
>>> gs.tight_layout(fig)
>>> plt.show()
>>> ###
>>> Rcuts=[
... {'NAME':'R-Abzweig','nl':['R-3107','R-3427']}
... ,{'NAME':'R-EndsTest','nl':['R-HWSU','R-HKW3S']}
... ,{'NAME':'R-MatchesTest','nl':['R-HKW1','R-2104']}
... ]
>>> Vcuts=[
... {'NAME':'V-Abzweig','nl':['V-3107','V-3427']}
... ,{'NAME':'V-EndsTest','nl':['V-HWSU','V-HKW3S']}
... ,{'NAME':'V-MatchesTest','nl':['V-HKW1','V-2104']}
... ]
>>> fV=lambda row: True if row.KVR_i=='1' and row.KVR_k=='1' else False
>>> fR=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
>>> for vcut,rcut in zip(Vcuts,Rcuts):
... ret=xm.vAGSN_Add(nl=vcut['nl'],weight='L',Layer=1,AKTIV=None,NAME=vcut['NAME'],fmask=fV)
... ret=xm.vAGSN_Add(nl=rcut['nl'],weight='L',Layer=2,AKTIV=None,NAME=rcut['NAME'],fmask=fR)
>>> # Schnitte erneut mit Ergebnissen versorgen, da Schnitte neu definiert wurden
>>> xm.MxAdd(mx=mx,ForceNoH5Update=True)
>>> vAGSN=xm.dataFrames['vAGSN']
>>> for PH,P,RHO,Z in zip(['PH'],['P'],['RHO'],['Z']):
... vAGSN[PH]=vAGSN.apply(lambda row: row[P]*math.pow(10.,5.)/(row[RHO]*9.81),axis=1)
... vAGSN[PH]=vAGSN[PH]+vAGSN[Z].astype('float64')
>>> for bBzg,P,RHO,Z in zip(['bBzg'],['P'],['RHO'],['Z']):
... vAGSN[bBzg]=vAGSN.apply(lambda row: row[RHO]*9.81/math.pow(10.,5.),axis=1)
... vAGSN[bBzg]=vAGSN[P]+vAGSN[Z].astype('float64')*vAGSN[bBzg]
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3q,dpi=Rm.dpiSize)
>>> gs = gridspec.GridSpec(3, 1)
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> yAxes,yLines,xNodeInfs=Rm.Rm.pltHP(vAGSN[vAGSN['NAME'].isin(['R-Abzweig','V-Abzweig','AGFW Symposium DH','R-EndsTest','V-EndsTest','R-MatchesTest','V-MatchesTest'])],pAx=axNfd
... ,hpLines=['bBzg','Q']
... ,hpLineGeoms={
... 'V-Abzweig_1':{'masterHP':'AGFW Symposium DH_1','masterNode':'V-3107','matchType':'starts'}
... ,'R-Abzweig_2':{'masterHP':'AGFW Symposium DH_2','masterNode':'R-3107','matchType':'starts'}
... ,'V-EndsTest_1':{'masterHP':'AGFW Symposium DH_1','masterNode':'V-HKW3S','matchType':'ends'}
... ,'R-EndsTest_2':{'masterHP':'AGFW Symposium DH_2','masterNode':'R-HKW3S','matchType':'ends'}
... ,'V-MatchesTest_1':{'masterHP':'AGFW Symposium DH_1','masterNode':'V-1312','matchType':'matches','offset':-500}
... ,'R-MatchesTest_2':{'masterHP':'AGFW Symposium DH_2','masterNode':'R-1312','matchType':'matches'}
... }
... ,hpLineProps={
... 'AGFW Symposium DH_1_bBzg':{'label':'VL','color':'red' ,'linestyle':'-','linewidth':3}
... ,'AGFW Symposium DH_2_bBzg':{'label':'RL','color':'blue','linestyle':'-','linewidth':3}
... ,'AGFW Symposium DH_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'AGFW Symposium DH_2_Q':{'label':'RL Q','color':'lightblue','linestyle':'--','linewidth':2}
... ,'V-Abzweig_1_bBzg':{'label':'VL','color':'tomato' ,'linestyle':'-','linewidth':3}
... ,'R-Abzweig_2_bBzg':{'label':'RL','color':'plum' ,'linestyle':'-','linewidth':3}
... ,'V-Abzweig_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'R-Abzweig_2_Q':{'label':'VL Q','color':'lightblue' ,'linestyle':'--','linewidth':2}
... ,'V-EndsTest_1_bBzg':{'label':'VL','color':'lightcoral' ,'linestyle':'-','linewidth':3}
... ,'R-EndsTest_2_bBzg':{'label':'RL','color':'aquamarine' ,'linestyle':'-','linewidth':3}
... ,'V-EndsTest_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'R-EndsTest_2_Q':{'label':'VL Q','color':'lightblue' ,'linestyle':'--','linewidth':2}
... #,'V-MatchesTest_1_bBzg':{'label':'VL','color':'orange' ,'linestyle':'-','linewidth':1}
... ,'R-MatchesTest_2_bBzg':{'label':'RL','color':'slateblue' ,'linestyle':'-','linewidth':1}
... ,'V-MatchesTest_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'R-MatchesTest_2_Q':{'label':'VL Q','color':'lightblue' ,'linestyle':'--','linewidth':2}
... }
... )
>>> txt=axNfd.set_title('HP')
>>> gs.tight_layout(fig)
>>> plt.show()
>>> sorted(xNodeInfs.keys())
['AGFW Symposium DH_1', 'AGFW Symposium DH_2', 'R-Abzweig_2', 'R-EndsTest_2', 'R-MatchesTest_2', 'V-Abzweig_1', 'V-EndsTest_1', 'V-MatchesTest_1']
>>> xNodeInf=xNodeInfs['R-Abzweig_2']
>>> nl=Rcuts[0]['nl']
>>> nodeInfS=xNodeInf[nl[0]]
>>> nodeInfE=xNodeInf[nl[-1]]
>>> sorted(nodeInfS.keys())
['pDfIdx', 'x', 'xPlot']
>>> dxPlot=nodeInfE['xPlot']-nodeInfS['xPlot']
>>> dxHP=nodeInfE['x']-nodeInfS['x']
>>> dxPlot==dxHP
True
>>> nodeInfE['x']=round(nodeInfE['x'],3)
>>> nodeInfE['xPlot']=round(nodeInfE['xPlot'],3)
>>> {key:value for key,value in nodeInfE.items() if key not in ['pDfIdx']}
{'x': 3285.0, 'xPlot': 20312.428}
>>>
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# AXES
if 'pAx' not in keys:
kwds['pAx']=plt.gca()
if 'NAMECol' not in keys:
kwds['NAMECol']='NAME'
if 'LayerCol' not in keys:
kwds['LayerCol']='Layer'
if 'xCol' not in keys:
kwds['xCol']='x'
if 'hpLines' not in keys:
kwds['hpLines']=['P']
if 'hpLineProps' not in keys:
kwds['hpLineProps']={'NAME_1_P':{'label':'HP NAME Layer 1 P','color':'red','linestyle':'-','linewidth':3}}
if 'hpLineGeoms' not in keys:
kwds['hpLineGeoms']=None
if 'edgeColSequence' not in keys:
kwds['edgeColSequence']=['NAME_i','NAME_k','nextNODE']
if 'yTwinedAxesPosDeltaHPStart' not in keys:
# (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche; default: -0.0125
kwds['yTwinedAxesPosDeltaHPStart']=-0.0125
if 'yTwinedAxesPosDeltaHP' not in keys:
# (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche; default: -0.05
kwds['yTwinedAxesPosDeltaHP']=-0.05
if 'yAxesDetectionPattern' not in keys:
# regExp mit welcher die Achsentypen ermittelt werden
kwds['yAxesDetectionPattern']='([\w ]+)(_)(\d+)$'
logger.debug("{:s}xCol: {:s}.".format(logStr,kwds['xCol']))
logger.debug("{:s}hpLines: {:s}.".format(logStr,str(kwds['hpLines'])))
logger.debug("{:s}hpLineProps: {:s}.".format(logStr,str(kwds['hpLineProps'])))
logger.debug("{:s}hpLineGeoms: {:s}.".format(logStr,str(kwds['hpLineGeoms'])))
logger.debug("{:s}edgeColSequence: {:s}.".format(logStr,str(kwds['edgeColSequence'])))
logger.debug("{:s}yTwinedAxesPosDeltaHPStart: {:s}.".format(logStr,str(kwds['yTwinedAxesPosDeltaHPStart'])))
logger.debug("{:s}yTwinedAxesPosDeltaHP: {:s}.".format(logStr,str(kwds['yTwinedAxesPosDeltaHP'])))
logger.debug("{:s}yAxesDetectionPattern: {:s}.".format(logStr,str(kwds['yAxesDetectionPattern'])))
# Schnitte und Layer ermitteln
if kwds['NAMECol'] != None and kwds['LayerCol'] != None:
hPs=pDf[[kwds['NAMECol'],kwds['LayerCol']]].drop_duplicates()
elif kwds['NAMECol'] != None:
hPs=pDf[[kwds['NAMECol']]].drop_duplicates()
hPs['Layer']=None
elif kwds['LayerCol'] != None:
hPs=pDf[[kwds['LayerCol']]].drop_duplicates()
hPs['NAME']=None
hPs=hPs[['NAME','Layer']]
else:
hPs=pd.DataFrame(data={'NAME':[None],'Layer':[None]})
#logger.debug("{:s}hPs: {:s}.".format(logStr,hPs.to_string()))
# hPs hat 2 Spalten: NAME und Layer
# y-Achsen-Typen ermitteln
hpLineTypesSequence=[col if re.search(kwds['yAxesDetectionPattern'],col)==None else re.search(kwds['yAxesDetectionPattern'],col).group(1) for col in kwds['hpLines']]
# y-Achsen konstruieren
yAxes={}
colType1st=hpLineTypesSequence[0]
axHP=kwds['pAx']
axHP.spines["left"].set_position(("axes",kwds['yTwinedAxesPosDeltaHPStart'] ))
axHP.set_ylabel(colType1st)
yAxes[colType1st]=axHP
logger.debug("{:s}colType: {:s} is attached to Axes pcAx .".format(logStr,colType1st))
for idx,colType in enumerate(hpLineTypesSequence[1:]):
if colType not in yAxes:
yPos=kwds['yTwinedAxesPosDeltaHPStart']+kwds['yTwinedAxesPosDeltaHP']*len(yAxes)
logger.debug("{:s}colType: {:s}: new Axes_ yPos: {:1.4f} ...".format(logStr,colType,yPos))
# weitere y-Achse
axHP = axHP.twinx()
axHP.spines["left"].set_position(("axes", yPos))
pltMakePatchSpinesInvisible(axHP)
axHP.spines['left'].set_visible(True)
axHP.yaxis.set_label_position('left')
axHP.yaxis.set_ticks_position('left')
axHP.set_ylabel(colType)
yAxes[colType]=axHP
yLines={}
xNodeInfs={}
for index,row in hPs.iterrows():
# über alle Schnitte (NAME) und Layer (Layer)
def getKeyBaseAndDf(dfSource,col1Name,col2Name,col1Value,col2Value):
#logger.debug("{:s}getKeyBaseAndDf: dfSource: {:s} ...".format(logStr,dfSource[[col1Name,col2Name,'nextNODE']].to_string()))
# dfSource bzgl. cols filtern
if col1Name != None and col2Name != None:
dfFiltered=dfSource[
(dfSource[col1Name].astype(str)==str(col1Value))
&
(dfSource[col2Name].astype(str)==str(col2Value))
]
keyBase=str(row[col1Name])+'_'+str(col2Value)+'_'#+hpLine
logger.debug("{:s}getKeyBaseAndDf: Schnitt: {!s:s} Layer: {!s:s} ...".format(logStr,col1Value,col2Value))
elif col1Name != None:
dfFiltered=dfSource[
(dfSource[col1Name].astype(str)==str(col1Value))
]
keyBase=str(col1Value)+'_'#+hpLine
logger.debug("{:s}getKeyBaseAndDf: Schnitt: {!s:s} ...".format(logStr,col1Value))
elif col2Name != None:
dfFiltered=dfSource[
(dfSource[col2Name].astype(str)==str(col2Value))
]
keyBase=str(col2Value)+'_'#+hpLine
logger.debug("{:s}getKeyBaseAndDf: Layer: {!s:s} ...".format(logStr,col2Value))
else:
dfFiltered=dfSource
keyBase=''
#logger.debug("{:s}getKeyBaseAndDf: dfFiltered: {:s} ...".format(logStr,dfFiltered[[col1Name,col2Name,'nextNODE']].to_string()))
return keyBase, dfFiltered
# Schnitt+Layer nach hPpDf filtern
keyBase,hPpDf=getKeyBaseAndDf(pDf
,kwds['NAMECol'] # Spaltenname 1
,kwds['LayerCol'] # Spaltenname 2
,row[kwds['NAMECol']] # Spaltenwert 1
,row[kwds['LayerCol']] # Spaltenwert 2
)
if hPpDf.empty:
logger.info("{:s}Schnitt: {!s:s} Layer: {!s:s}: NICHT in pDf ?! ...".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']]))
continue
xOffset=0
xOffsetStatic=0
xFactorStatic=1
if kwds['hpLineGeoms'] != None:
if keyBase.rstrip('_') in kwds['hpLineGeoms'].keys():
hpLineGeom=kwds['hpLineGeoms'][keyBase.rstrip('_')]
logger.debug("{:s}Line: {:s}: hpLineGeom: {:s} ...".format(logStr,keyBase.rstrip('_'),str(hpLineGeom)))
if 'offset' in hpLineGeom.keys():
xOffsetStatic=hpLineGeom['offset']
if 'factor' in hpLineGeom.keys():
xFactorStatic=hpLineGeom['factor']
if 'masterHP' in hpLineGeom.keys():
masterHP=hpLineGeom['masterHP']
name=masterHP.split('_')[0]
layer=masterHP.replace(name,'')
layer=layer.replace('_','')
keyBaseMaster,hPpDfMaster=getKeyBaseAndDf(pDf
,kwds['NAMECol'] # Spaltenname 1
,kwds['LayerCol'] # Spaltenname 2
,name # Spaltenwert 1
,layer # Spaltenwert 2
)
if 'masterNode' in hpLineGeom.keys():
masterNode=hpLineGeom['masterNode']
def fGetMatchingRows(row,cols,matchNode):
for col in cols:
if row[col]==matchNode:
return True
return False
# Anker x suchen anhand der Spalten ...
if 'matchAnchorCols' in hpLineGeom.keys():
matchAnchorCols=hpLineGeom['matchAnchorCols']
else:
matchAnchorCols=[kwds['edgeColSequence'][2]]
# AnkerKnoten: Zeilen die in Frage kommen ....
hPpDfMatched=hPpDf[hPpDf.apply(fGetMatchingRows,axis=1,cols=matchAnchorCols,matchNode=masterNode)]
hPpDfMasterMatched=hPpDfMaster[hPpDfMaster.apply(fGetMatchingRows,axis=1,cols=matchAnchorCols,matchNode=masterNode)]
if 'matchType' in hpLineGeom.keys():
matchType=hpLineGeom['matchType']
else:
matchType='starts'
# Anker x suchen in Master -------------------------
if 'matchAnchor' in hpLineGeom.keys():
matchAnchor=hpLineGeom['matchAnchor']
else:
matchAnchor='max'
if hPpDfMasterMatched.empty:
logger.info("{:s}Schnitt: {!s:s}_{!s:s} Master: {!s:s}_{!s:s} masterNode: {!s:s}: in Master in den cols {!s:s} NICHT gefunden. Loesung: xMasterOffset=0.".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']],name,layer,masterNode,matchAnchorCols))
xMasterOffset=0
else:
if matchAnchor=='min':
hPpDfMasterMatched=hPpDfMaster.loc[hPpDfMasterMatched[kwds['xCol']].idxmin(),:]
else: # matchAnchor=='max'
hPpDfMasterMatched=hPpDfMaster.loc[hPpDfMasterMatched.iloc[::-1][kwds['xCol']].idxmax(),:]
xMasterOffset=hPpDfMasterMatched[kwds['xCol']]
logger.debug("{:s}Schnitt: {!s:s}_{!s:s} Master: {!s:s}_{!s:s} masterNode: {!s:s} xMasterOffset={:9.3f} ...".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']],name,layer,masterNode,xMasterOffset))
# Anker x suchen in HP selbst --------------------------
if 'matchAnchorChild' in hpLineGeom.keys():
matchAnchorChild=hpLineGeom['matchAnchorChild']
else:
matchAnchorChild='max'
if hPpDfMatched.empty:
logStrTmp="{:s}Schnitt: {!s:s}_{!s:s} Master: {!s:s}_{!s:s} masterNode: {!s:s}: in Child in den cols {!s:s} NICHT gefunden.".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']],name,layer,masterNode,matchAnchorCols)
if matchType=='matches':
logger.info(logStrTmp+' Loesung: xChildOffset=0.')
else:
if matchType=='ends':
logger.debug(logStrTmp+' Child endet nicht mit masterNode. xChildOffset=0')
else:
logger.debug(logStrTmp+' Child startet evtl. mit masterNode. xChildOffset=0')
xChildOffset=0
else:
if matchAnchorChild=='min':
hPpDfMatched=hPpDf.loc[hPpDfMatched[kwds['xCol']].idxmin(),:]
else: # matchAnchorChild=='max'
hPpDfMatched=hPpDf.loc[hPpDfMatched.iloc[::-1][kwds['xCol']].idxmax(),:]
xChildOffset=hPpDfMatched[kwds['xCol']]
logger.debug("{:s}Schnitt: {!s:s}_{!s:s} Master: {!s:s}_{!s:s} masterNode: {!s:s} xChildOffset={:9.3f} ...".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']],name,layer,masterNode,xChildOffset))
# xOffset errechnen
if matchType=='starts':
xOffset=xMasterOffset-hPpDf[kwds['xCol']].min() # der Beginn
# matchNode ist Anfang
if hPpDf[kwds['edgeColSequence'][2]].iloc[0] == hPpDf[kwds['edgeColSequence'][1]].iloc[0]:
# nextNode = k
matchNode=hPpDf[kwds['edgeColSequence'][0]].iloc[0]
else:
# nextNode = i
matchNode=hPpDf[kwds['edgeColSequence'][1]].iloc[0]
elif matchType=='ends':
xOffset=xMasterOffset-hPpDf[kwds['xCol']].max() # das Ende
# matchNode ist Ende
if hPpDf[kwds['edgeColSequence'][2]].iloc[-1] == hPpDf[kwds['edgeColSequence'][1]].iloc[-1]:
# nextNode = k
matchNode=hPpDf[kwds['edgeColSequence'][1]].iloc[-1]
else:
# nextNode = i
matchNode=hPpDf[kwds['edgeColSequence'][0]].iloc[-1]
else: # 'matches'
# per Knoten
matchNode=masterNode
xOffset=xMasterOffset-xChildOffset
# xOffset wurde berechnet
# masterNode und matchNode sind bekannt
logger.debug("{:s}hPpDfMatched: {:s} ...".format(logStr,hPpDfMatched[[kwds['NAMECol'],kwds['LayerCol'],'nextNODE',kwds['xCol'],'NAME_i','NAME_k','OBJTYPE','IptIdx']].to_string()))
logger.debug("{:s}hPpDfMasterMatched: {:s} ...".format(logStr,hPpDfMasterMatched[[kwds['NAMECol'],kwds['LayerCol'],'nextNODE',kwds['xCol'],'NAME_i','NAME_k','OBJTYPE','IptIdx']].to_string()))
else:
logger.debug("{:s}Line: {:s}: keine Geometrieeigenschaften definiert.".format(logStr,keyBase.rstrip('_')))
# xNodeInfs ermitteln
nodeList=hPpDf[kwds['edgeColSequence'][2]].copy()
if hPpDf[kwds['edgeColSequence'][2]].iloc[0] == hPpDf[kwds['edgeColSequence'][1]].iloc[0]:
# nextNode = k
# 1. Knoten i
nodeList.iloc[0]=hPpDf[kwds['edgeColSequence'][0]].iloc[0]
else:
# nextNode = i
# 1. Knoten k
nodeList.iloc[0]=hPpDf[kwds['edgeColSequence'][1]].iloc[0]
nodeList=nodeList.unique()
xNodeInf={}
for idx,node in enumerate(nodeList):
nodeInf={}
if idx==0:
nodeInf[kwds['xCol']]=0
nodeInf['pDfIdx']=hPpDf.index.values[0]
else:
nodeInf[kwds['xCol']]=hPpDf[hPpDf[kwds['edgeColSequence'][2]]==node][kwds['xCol']].max()
nodeInf['pDfIdx']=hPpDf[hPpDf[kwds['edgeColSequence'][2]]==node][kwds['xCol']].idxmax()
nodeInf[kwds['xCol']+'Plot']=nodeInf[kwds['xCol']]*xFactorStatic+xOffset+xOffsetStatic
xNodeInf[node]=nodeInf
xNodeInfs[keyBase.rstrip('_')]=xNodeInf
# über alle Spalten (d.h. darzustellenden y-Werten)
for idx,hpLine in enumerate(kwds['hpLines']):
key=keyBase+hpLine
logger.debug("{:s}Line: {:s} ...".format(logStr,key))
if key in kwds['hpLineProps']:
hpLineProp=kwds['hpLineProps'][key]
if hpLineProp == None:
logger.debug("{:s}Line: {:s} ...: kein Plot.".format(logStr,key))
continue # kein Plot
label=key
color='black'
linestyle='-'
linewidth=3
hpLineType=hpLineTypesSequence[idx]
axHP=yAxes[hpLineType]
lines=axHP.plot(hPpDf[kwds['xCol']]*xFactorStatic+xOffset+xOffsetStatic,hPpDf[hpLine],label=label,color=color,linestyle=linestyle,linewidth=linewidth)
yLines[label]=lines[0]
if key in kwds['hpLineProps']:
hpLineProp=kwds['hpLineProps'][key]
logger.debug("{:s}Line: {:s}: hpLineProp: {:s}.".format(logStr,key,str(hpLineProp)))
for prop,value in hpLineProp.items():
plt.setp(yLines[label],"{:s}".format(prop),value)
else:
logger.debug("{:s}Line: {:s}: keine Eigenschaften definiert.".format(logStr,key))
continue
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return yAxes,yLines,xNodeInfs
@classmethod
def pltTC(cls,pDf,tcLines,**kwds):
"""
Plots a Time Curve Diagram.
Args:
DATA:
pDf: dataFrame
index: times
cols: values (with mx.df colnames)
tcLines: dct
defining the Curves and their Layout:
Key:
OBJTYPE~NAME1~NAME2~ATTRTYPE is used as a key, d.h. OBJTYPE_PK ist nicht im Schluessel enthalten
* tcLines - Example - = {
'KNOT~NAME1~~PH':{'label':'VL','color':'red' ,'linestyle':'-','linewidth':3}
}
Definition der y-Achsentypen (y-Axes):
* werden ermittelt aus den verschiedenen ATTRTYPEs in tcLines
* ATTRTYPE - z.B. 'PH' - wird dabei als Bezeichner für den Achsentyp benutzt
* die Achsen werden erstellt in der Reihenfolge in der sie in tcLines auftreten
* yTwinedAxesPosDeltaHPStart: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche; default: -0.0125
* yTwinedAxesPosDeltaHP: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche; default: -0.05
Attribute:
* alle gültigen
* +
* forceYType
* offset
* factor
* timeStart
* timeEnd
* legendInfosFmt
* label
AXES:
pAx: Axes to be plotted on; if not specified: gca() is used
x-Achsen-Formatierung:
majLocator - Beispiele:
mdates.MinuteLocator(interval=5)
mdates.MinuteLocator(byminute=[0,5,10,15,20,25,30,35,40,45,50,55])
majFormatter - Beispiele:
mdates.DateFormatter('%d.%m.%y: %H:%M')
xTicksLabelsOff: wenn True, dann keine x-Achsen TickLabels
Return:
yAxes: dct with AXES; key=y-Achsentypen
yLines: dct with Line2Ds; key=Index from tcLines
vLines: dct with Line2Ds; key=Index from vLines
yLinesLegendLabels: dct with Legendlabels; key=Index from tcLines
>>> # -q -m 0 -s pltTC -y no -z no -w DHNetwork
>>> import pandas as pd
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import matplotlib.gridspec as gridspec
>>> import matplotlib.dates as mdates
>>> import math
>>> try:
... import Rm
... except ImportError:
... from PT3S import Rm
>>> # ---
>>> # xm=xms['DHNetwork']
>>> mx=mxs['DHNetwork']
>>> sir3sID=mx.getSir3sIDFromSir3sIDoPK('ALLG~~~LINEPACKGEOM') # 'ALLG~~~5151766074450398225~LINEPACKGEOM'
>>> # mx.df[sir3sID].describe()
>>> # mx.df[sir3sID].iloc[0]
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3q,dpi=Rm.dpiSize)
>>> gs = gridspec.GridSpec(3, 1)
>>> # --------------------------
>>> axTC = fig.add_subplot(gs[0])
>>> yAxes,yLines,vLines,yLinesLegendLabels=Rm.Rm.pltTC(mx.df
... ,tcLines={
... 'ALLG~~~LINEPACKRATE':{'label':'Linepackrate','color':'red' ,'linestyle':'-','linewidth':3,'drawstyle':'steps','factor':10}
... ,'ALLG~~~LINEPACKGEOM':{'label':'Linepackgeometrie','color':'b' ,'linestyle':'-','linewidth':3,'offset':-mx.df[sir3sID].iloc[0]
... ,'timeStart':mx.df.index[0]+pd.Timedelta('10 Minutes')
... ,'timeEnd':mx.df.index[-1]-pd.Timedelta('10 Minutes')}
... ,'RSLW~wNA~~XA':{'label':'RSLW~wNA~~XA','color':'lime','forceYType':'N'}
... ,'PUMP~R-A-SS~R-A-DS~N':{'label':'PUMP~R-A-SS~R-A-DS~N','color':'aquamarine','linestyle':'--','legendInfosFmt':'{:4.0f}'}
... }
... ,pAx=axTC
... ,vLines={
... 'a vLine Label':{'time': mx.df.index[0] + pd.Timedelta('10 Minutes')
... ,'color':'dimgrey'
... ,'linestyle':'--'
... ,'linewidth':5.}
... }
... ,majLocator=mdates.MinuteLocator(byminute=[0,5,10,15,20,25,30,35,40,45,50,55])
... ,majFormatter=mdates.DateFormatter('%d.%m.%y: %H:%M')
... #,xTicksLabelsOff=True
... )
>>> sorted(yAxes.keys())
['LINEPACKGEOM', 'LINEPACKRATE', 'N']
>>> sorted(yLines.keys())
['ALLG~~~LINEPACKGEOM', 'ALLG~~~LINEPACKRATE', 'PUMP~R-A-SS~R-A-DS~N', 'RSLW~wNA~~XA']
>>> sorted(vLines.keys())
['a vLine Label']
>>> gs.tight_layout(fig)
>>> plt.show()
>>>
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# AXES
if 'pAx' not in keys:
kwds['pAx']=plt.gca()
if 'yTwinedAxesPosDeltaHPStart' not in keys:
# (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche; default: -0.0125
kwds['yTwinedAxesPosDeltaHPStart']=-0.0125
if 'yTwinedAxesPosDeltaHP' not in keys:
# (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche; default: -0.05
kwds['yTwinedAxesPosDeltaHP']=-0.05
logger.debug("{:s}tcLines: {:s}.".format(logStr,str(tcLines)))
logger.debug("{:s}yTwinedAxesPosDeltaHPStart: {:s}.".format(logStr,str(kwds['yTwinedAxesPosDeltaHPStart'])))
logger.debug("{:s}yTwinedAxesPosDeltaHP: {:s}.".format(logStr,str(kwds['yTwinedAxesPosDeltaHP'])))
if 'lLoc' not in keys:
kwds['lLoc']='best'
if 'lFramealpha' not in keys:
kwds['lFramealpha']=matplotlib.rcParams["legend.framealpha"]
if 'lFacecolor' not in keys:
kwds['lFacecolor']='white'
if 'lOff' not in keys:
kwds['lOff']=False
yAxes=yLines=vLines=None
# fuer jede Spalte Schluessel ohne OBJTYPE_PK ermitteln == Schluessel in tcLines
colFromTcKey={}
for col in pDf.columns.tolist():
if pd.isna(col):
continue
try:
colNew=Mx.getSir3sIDoPKFromSir3sID(col)
colFromTcKey[colNew]=col # merken welche Originalspalte zu dem tcLines Schluessel gehoert
logger.debug("{:s}Zu Spalte ohne Schlüssel: {:s} gehört Spalte: {:s} in pDf.".format(logStr,colNew,col))
except:
logger.debug("{:s}keine Zuordnung gefunden (z.B. kein Mx.getSir3sIDoPKFromSir3sID-match) fuer pDf-Spalte: {:s}. Spaltenname(n) keine vollständigen SIR 3S Schluessel (mehr)?!".format(logStr,col))
# y-Achsen-Typen ermitteln
yTypesSequence=[]
for key,props in tcLines.items():
try:
mo=re.match(Mx.reSir3sIDoPKcompiled,key)
yType=mo.group('ATTRTYPE')
if 'forceYType' in props.keys():
yType=props['forceYType']
if yType not in yTypesSequence:
yTypesSequence.append(yType)
logger.debug("{:s}neuer y-Achsentyp: {:s}.".format(logStr,yType))
except:
logger.debug("{:s}kein Achsentyp ermittelt (z.B. kein Mx.reSir3sIDoPKcompiled-match) fuer: {:s}. tcLine(s) Schluessel kein SIR 3S Schluessel oPK?!".format(logStr,key))
# y-Achsen konstruieren
yAxes={}
colType1st=yTypesSequence[0]
axTC=kwds['pAx']
axTC.spines["left"].set_position(("axes",kwds['yTwinedAxesPosDeltaHPStart'] ))
axTC.set_ylabel(colType1st)
yAxes[colType1st]=axTC
logger.debug("{:s}colType: {:s}: is attached to 1st Axes.".format(logStr,colType1st))
for idx,colType in enumerate(yTypesSequence[1:]):
# weitere y-Achse
yPos=kwds['yTwinedAxesPosDeltaHPStart']+kwds['yTwinedAxesPosDeltaHP']*len(yAxes)
logger.debug("{:s}colType: {:s}: is attached to a new Axes: yPos: {:1.4f} ...".format(logStr,colType,yPos))
axTC = axTC.twinx()
axTC.spines["left"].set_position(("axes", yPos))
pltMakePatchSpinesInvisible(axTC)
axTC.spines['left'].set_visible(True)
axTC.yaxis.set_label_position('left')
axTC.yaxis.set_ticks_position('left')
axTC.set_ylabel(colType)
yAxes[colType]=axTC
# ueber alle definierten Kurven
# max. Länge label vor Infos ermitteln
labels=[]
infos=[]
for key,props in tcLines.items():
label=key
if 'label' in props:
label=props['label']
labels.append(label)
if 'legendInfosFmt' in props:
legendInfosFmt=props['legendInfosFmt']
else:
legendInfosFmt='{:6.2f}'
if key not in colFromTcKey.keys():
logger.debug("{:s}Line: {:s}: es konnte keine Spalte in pDf ermittelt werden. Spaltenname(n) kein SIR 3S Schluessel?! Kein Plot.".format(logStr,key))
continue
else:
col=colFromTcKey[key]
logger.debug("{:s}Line: {:s}: Spalte in pDf: {:s}.".format(logStr,key,col))
if 'timeStart' in props:
timeStart=props['timeStart']
else:
timeStart=pDf.index[0]
if 'timeEnd' in props:
timeEnd=props['timeEnd']
else:
timeEnd=pDf.index[-1]
plotDf=pDf.loc[timeStart:timeEnd,:]
infos.append(legendInfosFmt.format(plotDf[col].min()))
infos.append(legendInfosFmt.format(plotDf[col].max()))
labelsLength=[len(label) for label in labels]
labelsLengthMax=max(labelsLength)
infosLength=[len(info) for info in infos]
infosLengthMax=max(infosLength)
# zeichnen
yLines={}
yLinesLegendLabels={}
# ueber alle definierten Kurven
for key,props in tcLines.items():
if key not in colFromTcKey.keys():
logger.debug("{:s}Line: {:s}: es konnte keine Spalte in pDf ermittelt werden. Spaltenname(n) kein SIR 3S Schluessel?! Kein Plot.".format(logStr,key))
continue
else:
col=colFromTcKey[key]
mo=re.match(Mx.reSir3sIDoPKcompiled,key)
yType=mo.group('ATTRTYPE')
if 'forceYType' in props.keys():
yType=props['forceYType']
axTC=yAxes[yType]
logger.debug("{:s}Line: {:s} on Axes {:s} ...".format(logStr,key,yType))
label=key
color='black'
linestyle='-'
linewidth=3
if 'offset' in props:
offset=props['offset']
else:
offset=0.
if 'factor' in props:
factor=props['factor']
else:
factor=1.
if 'timeStart' in props:
timeStart=props['timeStart']
else:
timeStart=pDf.index[0]
if 'timeEnd' in props:
timeEnd=props['timeEnd']
else:
timeEnd=pDf.index[-1]
if 'legendInfosFmt' in props:
legendInfosFmt=props['legendInfosFmt']
else:
legendInfosFmt='{:6.2f}'
plotDf=pDf.loc[timeStart:timeEnd,:]
lines=axTC.plot(plotDf.index.values,plotDf[col]*factor+offset,label=label,color=color,linestyle=linestyle,linewidth=linewidth)
yLines[key]=lines[0]
if 'label' in props:
label=props['label']
else:
label=label
legendLabelFormat="Anf.: {:s} Ende: {:s} Min: {:s} Max: {:s}"#.format(*4*[legendInfosFmt])
legendLabelFormat="{:s} "+legendLabelFormat
legendInfos=[plotDf[col].iloc[0],plotDf[col].iloc[-1],plotDf[col].min(),plotDf[col].max()]
legendInfos=[factor*legendInfo+offset for legendInfo in legendInfos]
legendLabel=legendLabelFormat.format(label.ljust(labelsLengthMax,' '),
*["{:s}".format(legendInfosFmt).format(legendInfo).rjust(infosLengthMax,' ') for legendInfo in legendInfos]
)
yLinesLegendLabels[key]=legendLabel
logger.debug("{:s}legendLabel: {:s}.".format(logStr,legendLabel))
for prop,value in props.items():
if prop not in ['forceYType','offset','factor','timeStart','timeEnd','legendInfosFmt']:
plt.setp(yLines[key],"{:s}".format(prop),value)
# x-Achse
# ueber alle Axes
for key,ax in yAxes.items():
ax.set_xlim(pDf.index[0],pDf.index[-1])
if 'majLocator' in kwds.keys():
ax.xaxis.set_major_locator(kwds['majLocator'])
if 'majFormatter' in kwds.keys():
ax.xaxis.set_major_formatter(kwds['majFormatter'])
plt.setp(ax.xaxis.get_majorticklabels(),rotation='vertical',ha='center')
ax.xaxis.grid()
# Beschriftung ausschalten
if 'xTicksLabelsOff' in kwds.keys(): # xTicksOff
if kwds['xTicksLabelsOff']:
logger.debug("{:s}Achse: {:s}: x-Achse Labels aus.".format(logStr,key))
#for tic in ax.xaxis.get_major_ticks():
# tic.tick1On = tic.tick2On = False
ax.set_xticklabels([])
# vLines
# ueber alle definierten vLines
vLines={}
if 'vLines' in kwds.keys():
for key,props in kwds['vLines'].items():
if 'time' in props.keys():
logger.debug("{:s}vLine: {:s} ....".format(logStr,key))
vLine=ax.axvline(x=props['time'], ymin=0, ymax=1, label=key)
vLines[key]=vLine
for prop,value in props.items():
if prop not in ['time']:
plt.setp(vLine,"{:s}".format(prop),value)
else:
logger.debug("{:s}vLine: {:s}: time nicht definiert.".format(logStr,key))
# Legend
import matplotlib.font_manager as font_manager
font = font_manager.FontProperties(family='monospace'
#weight='bold',
#style='normal',
#size=16
)
if not kwds['lOff']:
l=kwds['pAx'].legend(
tuple([yLines[yline] for yline in yLines])
,
tuple([yLinesLegendLabels[yLine] for yLine in yLinesLegendLabels])
,loc=kwds['lLoc']
,framealpha=kwds['lFramealpha']
,facecolor=kwds['lFacecolor']
,prop=font
)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return yAxes,yLines,vLines,yLinesLegendLabels
def __init__(self,xm=None,mx=None):
"""
Args:
xm: Xm.Xm Object
mx: Mx.Mx Object
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
self.xm=xm
self.mx=mx
try:
vNRCV_Mx1=self.xm.dataFrames['vNRCV_Mx1'] # d.h. Sachdaten bereits annotiert mit MX1-Wissen
except:
logger.debug("{:s}{:s} not in {:s}. Sachdaten mit MX1-Wissen zu annotieren wird nachgeholt ...".format(logStr,'vNRCV_Mx1','dataFrames'))
self.xm.MxSync(mx=self.mx)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltNetDHUS(self,**kwds):
"""Plot: Net: DistrictHeatingUnderSupply.
Args (optional):
TIMEs (als TIMEDELTA zu Szenariumbeginn):
* timeDeltaToRef: Reference Scenariotime (for MeasureInRefPerc-Calculations) (default: pd.to_timedelta('0 seconds'))
* timeDeltaToT: Scenariotime (default: pd.to_timedelta('0 seconds'))
FWVB
* pFWVBFilterFunction: Filterfunction to be applied to FWVB to determine the FWVB to be plotted
* default: lambda df: (df.CONT_ID.astype(int).isin([1001])) & (df.W0LFK>0)
* CONT_IDisIn: [1001]
* um zu vermeiden, dass FWVB aus Bloecken gezeichnet werden (unwahrscheinlich, dass es solche gibt)
* W0LFK>0:
* um zu vermeiden, dass versucht wird, FWVB mit der Soll-Leistung 0 zu zeichnen (pFWVBAttribute default is 'W0LFK')
FWVB Attribute (Size, z-Order) - from vFWVB
* pFWVBAttribute: columnName (default: 'W0LFK')
* the column must be able to be converted to a float
* the conversion is done before FilterFunction
* see ApplyFunction and NaNValue for conversion details:
* pFWVBAttributeApplyFunction: Function to be applied to column pFWVBAttribute
* default: lambda x: pd.to_numeric(x,errors='coerce')
* pFWVBAttributeApplyFunctionNaNValue: Value for NaN-Values produced by pFWVBAttributeApplyFunction if any
* default: 0
* .fillna(pFWVBAttributeApplyFunktionNaNValue).astype(float) is called after ApplyFunction
* pFWVBAttributeAsc: z-Order (default: False d.h. "kleine auf große")
* pFWVBAttributeRefSize: scatter Sy-Area in pts^2 of for RefSizeValue (default: 10**2)
* corresponding RefSizeValue is Attribute.std() or Attribute.mean() if Attribute.std() is < 1
FWVB (plot only large (small, medium) FWVB ...)
* quantil_pFWVBAttributeHigh <= (default: 1.)
* quantil_pFWVBAttributeLow >= (default: .0)
* default: all FWVB are plotted
* note that Attribute >0 is a precondition
FWVB Measure (Color) - from mx
* pFWVBMeasure (default: 'FWVB~*~*~*~W')
* float() must be possible
* pFWVBMeasureInRefPerc (default: True d.h. Measure wird verarbeitet in Prozent T zu Ref)
* 0-1
* if refValue is 0 than refPerc-Result is set to 1
* pFWVBMeasureAlpha/Colormap/Clip
* 3Classes
* pFWVBMeasure3Classes (default: False)
* False:
* Measure wird nicht in 3 Klassen dargestellt
* die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt dennoch
* CatTexts (werden verwendet wenn 3Classes Wahr gesetzt ist)
* für CBLegend (3Classes) als _zusätzliche Beschriftung rechts
* als Texte für die Spalte MCategory in return pFWVB
* pMCatTopText
* pMCatMidText
* pMCatBotText
* CatAttribs (werden verwendet wenn 3Classes Wahr gesetzt ist)
* für die Knotendarstellung
* pMCatTopAlpha/Color/Clip
* pMCatMidAlpha/Colormap/Clip
* pMCatBotAlpha/Color/Clip
* CBFixedLimits
* pFWVBMeasureCBFixedLimits (default: False d.h. Farbskala nach vorh. min./max. Wert)
* wird Wahr gesetzt sein, wenn 3Classes Wahr gesetzt ist
* damit die mittlere Farbskala den Klassengrenzen "gehorcht"
* pFWVBMeasureCBFixedLimitLow (default: .10)
* pFWVBMeasureCBFixedLimitHigh (default: .95)
CB
* CBFraction: fraction of original axes to use for colorbar (default: 0.05)
* CBHpad: fraction of original axes between colorbar and new image axes (default: 0.0275)
* CBLabelPad (default: -50)
* CBTicklabelsHPad (default: 0.)
* CBAspect: ratio of long to short dimension (default: 10.)
* CBShrink: fraction by which to shrink the colorbar (default: .3)
* CBAnchorHorizontal: horizontaler Fußpunkt der colorbar in Plot-% (default: 0.)
* CBAnchorVertical: vertikaler Fußpunkt der colorbar in Plot-% (default: 0.2)
CBLegend (3Classes) - Parameterization of the representative Symbols
* CBLe3cTopVPad (default: 1+1*1/4)
* CBLe3cMidVPad (default: .5)
* CBLe3cBotVPad (default: 0-1*1/4)
* "1" is the height of the Colorbar
* the VPads (the vertical Sy-Positions) are defined in cax.transAxes Coordinates
* cax is the Colorbar Axes
* CBLe3cSySize=10**2 (Sy-Area in pts^2)
* CBLe3cSyType='o'
ROHR
* pROHRFilterFunction: Filterfunction to be applied to PIPEs to determine the PIPEs to be plotted
* default: lambda df: (df.KVR.astype(int).isin([2])) & (df.CONT_ID.astype(int).isin([1001])) & (df.DI.astype(float)>0)
* KVRisIn: [2]
* 1: supply-line
* 2: return-line
* CONT_IDisIn: [1001]
* um zu vermeiden, dass Rohre aus Bloecken gezeichnet werden (deren Koordinaten nicht zu den Koordinaten von Rohren aus dem Ansichtsblock passen)
* DI>0:
* um zu vermeiden, dass versucht wird, Rohre mit dem Innendurchmesser 0 zu zeichnen (pROHRAttribute default is 'DI')
ROHR (PIPE-Line: Size and Color, z-Order) - from vROHR
* pROHRAttribute: columnName (default: 'DI')
* the column must be able to be converted to a float
* the conversion is done before FilterFunction
* see ApplyFunction and NaNValue for conversion details:
* pROHRAttributeApplyFunction: Function to be applied to column pROHRAttribute
* default: lambda x: pd.to_numeric(x,errors='coerce')
* pROHRAttributeApplyFunctionNaNValue: Value for NaN-Values produced by pROHRAttributeApplyFunction if any
* default: 0
* .fillna(pROHRAttributeApplyFunktionNaNValue).astype(float) is called after ApplyFunction
* pROHRAttributeAsc: z-Order (default: False d.h. "kleine auf grosse")
* pROHRAttributeLs (default: '-')
* pROHRAttributeRefSize: plot linewidth in pts for RefSizeValue (default: 1.0)
* pROHRAttributeSizeMin (default: None): if set: use pROHRAttributeSizeMin-Value as Attribute for LineSize if Attribute < pROHRAttributeSizeMin
* corresponding RefSizeValue is Attribute.std() or Attribute.mean() if Attribute.std() is < 1
* pROHRAttributeColorMap (default: plt.cm.binary)
* pROHRAttributeColorMapUsageStart (default: 1./3; Wertebereich: [0,1])
* Farbskala nach vorh. min./max. Wert
* die Farbskala wird nur ab UsageStart genutzt
* d.h. Werte die eine "kleinere" Farbe haetten, bekommen die Farbe von UsageStart
ROHR (plot only large (small, medium) pipes ...)
* quantil_pROHRAttributeHigh <= (default: 1.)
* quantil_pROHRAttributeLow >= (default: .75)
* default: only the largest 25% are plotted
* note that Attribute >0 is a precondition
ROHR (PIPE-Marker: Size and Color) - from mx
* pROHRMeasure columnName (default: 'ROHR~*~*~*~QMAV')
* pROHRMeasureApplyFunction: Function to be applied to column pROHRMeasure (default: lambda x: math.fabs(x))
* pROHRMeasureMarker (default: '.')
* pROHRMeasureRefSize: plot markersize for RefSizeValue in pts (default: 1.0)
* pROHRMeasureSizeMin (default: None): if set: use pROHRMeasureSizeMin-Value as Measure for MarkerSize if Measure < pROHRMeasureSizeMin
* corresponding RefSizeValue is Measure.std() or Measure.mean() if Measure.std() is < 1
* if pROHRMeasureRefSize is None: plot markersize will be plot linewidth
* pROHRMeasureColorMap (default: plt.cm.cool)
* pROHRMeasureColorMapUsageStart (default: 0.; Wertebereich: [0,1])
* Farbskala nach vorh. min./max. Wert
* die Farbskala wird nur ab UsageStart genutzt
* d.h. Werte die eine "kleinere" Farbe hätten, bekommen die Farbe von UsageStart
NRCVs - NumeRiCal Values to be displayed
* pFIGNrcv: List of Sir3sID RegExps to be displayed (i.e. ['KNOT~PKON-Knoten~\S*~\S+~QM']) default: None
the 1st Match is used if a RegExp matches more than 1 Channel
further Examples for RegExps (and corresponding Texts):
* WBLZ~WärmeblnzGes~\S*~\S+~WES (Generation)
* WBLZ~WärmeblnzGes~\S*~\S+~WVB (Load)
* WBLZ~WärmeblnzGes~\S*~\S+~WVERL (Loss)
WBLZ~[\S ]+~\S*~\S+~\S+: Example for a RegExp matching all Channels with OBJTYPE WBLZ
* pFIGNrcvTxt: corresponding (same length required!) List of Texts (i.e. ['Kontrolle DH']) default: None
* pFIGNrcvFmt (i.e. '{:12s}: {:8.2f} {:6s}')
* Text (from pFIGNrcvTxt)
* Value
* UNIT (determined from Channel-Data)
* pFIGNrcvPercFmt (i.e. ' {:6.1f}%')
* ValueInRefPercent
* if refValue==0: 100%
* pFIGNrcvXStart (.5 default)
* pFIGNrcvYStart (.5 default)
Category - User Heat Balances to be displayed
* pFWVBGCategory: List of Heat Balances to be displayed (i.e. ['BLNZ1u5u7']) default: None
* pFWVBGCategoryUnit: Unit of all these Balances (default: '[kW]'])
* pFWVBGCategoryXStart (.1 default)
* pFWVBGCategoryYStart (.9 default)
* pFWVBGCategoryCatFmt (i.e. '{:12s}: {:6.1f} {:4s}')
* Category NAME
* Category Load
* pFWVBGCategoryUnit
* pFWVBGCategoryPercFmt (i.e. ' {:6.1f}%')
* Last Ist/Soll
* pFWVBGCategory3cFmt (i.e. ' {:5d}/{:5d}/{:5d}')
* NOfTops
* NOfMids
* NOfBots
VICs - VeryImportantCustomers whose Values to be displayed
* pVICsDf: DataFrame with VeryImportantCustomers (Text & Specification)
columns expected:
* Kundenname (i.e. 'VIC1') - Text
* Knotenname (i.e. 'V-K007') - Specification by Supply-Node
i.e.: pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']})
* pVICsPercFmt (i.e. '{:12s}: {:6.1f}%')
* Kundenname
* Load in Percent to Reference
* pVICsFmt (i.e. '{:12s}: {:6.1f} {:6s}')
* Kundenname
* Load
* pFWVBGCategoryUnit
* pVICsXStart (.5 default)
* pVICsYStart (.1 default)
Figure:
* pltTitle: title [not suptitle] (default: 'pltNetFigAx')
* figFrameon: figure frame (background): displayed or invisible (default: True)
* figEdgecolor: edge color of the Figure rectangle (default: 'black')
* figFacecolor: face color of the Figure rectangle (default: 'white')
Returns:
pFWVB
* columns changed (compared to vFWVB):
* pFWVBAttribute (wg. z.B. pFWVBAttributeApplyFunction und .astype(float))
* columns added (compared to vFWVB):
* Measure (in % zu Ref wenn pFWVBMeasureInRefPer=True)
* MeasureRef (Wert von Measure im Referenzzustand)
* MeasureOrig (Wert von Measure)
* MCategory: str (Kategorisierung von Measure mit FixedLimitHigh/Low-Werten):
* TopText or
* MidText or
* BotText
* GCategory: list (non-empty only if req. GCategories are a subset of the available Categories and object belongs to a req. Category)
* VIC (filled with Kundenname from pVICsDf)
* rows (compared to vFWVB):
* pFWVB enthaelt dieselben Objekte wie vFWVB
* aber: die geplotteten Objekte sind ggf. nur eine Teilmenge (wg. z.B. pFWVBFilterFunction)
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keysDefined=['CBAnchorHorizontal', 'CBAnchorVertical', 'CBAspect', 'CBFraction', 'CBHpad', 'CBLabelPad'
,'CBLe3cBotVPad', 'CBLe3cMidVPad', 'CBLe3cSySize', 'CBLe3cSyType', 'CBLe3cTopVPad'
,'CBShrink', 'CBTicklabelsHPad'
,'figEdgecolor', 'figFacecolor', 'figFrameon'
,'pFIGNrcv','pFIGNrcvFmt', 'pFIGNrcvPercFmt','pFIGNrcvTxt', 'pFIGNrcvXStart', 'pFIGNrcvYStart'
,'pFWVBFilterFunction'
,'pFWVBAttribute'
,'pFWVBAttributeApplyFunction','pFWVBAttributeApplyFunctionNaNValue'
,'pFWVBAttributeAsc'
,'pFWVBAttributeRefSize'
,'pFWVBGCategory', 'pFWVBGCategoryUnit','pFWVBGCategory3cFmt','pFWVBGCategoryCatFmt', 'pFWVBGCategoryPercFmt', 'pFWVBGCategoryXStart', 'pFWVBGCategoryYStart'
,'pFWVBMeasure', 'pFWVBMeasure3Classes', 'pFWVBMeasureAlpha', 'pFWVBMeasureCBFixedLimitHigh', 'pFWVBMeasureCBFixedLimitLow', 'pFWVBMeasureCBFixedLimits', 'pFWVBMeasureClip', 'pFWVBMeasureColorMap', 'pFWVBMeasureInRefPerc'
,'pMCatBotAlpha', 'pMCatBotClip', 'pMCatBotColor', 'pMCatBotText', 'pMCatMidAlpha', 'pMCatMidClip', 'pMCatMidColorMap', 'pMCatMidText', 'pMCatTopAlpha', 'pMCatTopClip', 'pMCatTopColor', 'pMCatTopText'
,'pROHRFilterFunction'
,'pROHRAttribute'
,'pROHRAttributeApplyFunction','pROHRAttributeApplyFunctionNaNValue'
,'pROHRAttributeAsc', 'pROHRAttributeColorMap', 'pROHRAttributeColorMapUsageStart', 'pROHRAttributeLs', 'pROHRAttributeRefSize','pROHRAttributeSizeMin'
,'pROHRMeasure','pROHRMeasureApplyFunction'
,'pROHRMeasureColorMap', 'pROHRMeasureColorMapUsageStart', 'pROHRMeasureMarker', 'pROHRMeasureRefSize','pROHRMeasureSizeMin'
,'pVICsDf','pVICsPercFmt','pVICsFmt','pVICsXStart', 'pVICsYStart'
,'pltTitle'
,'quantil_pFWVBAttributeHigh', 'quantil_pFWVBAttributeLow'
,'quantil_pROHRAttributeHigh', 'quantil_pROHRAttributeLow'
,'timeDeltaToRef', 'timeDeltaToT']
keys=sorted(kwds.keys())
for key in keys:
if key in keysDefined:
value=kwds[key]
logger.debug("{0:s}kwd {1:s}: {2:s}".format(logStr,key,str(value)))
else:
logger.warning("{0:s}kwd {1:s} NOT defined!".format(logStr,key))
del kwds[key]
# TIMEs
if 'timeDeltaToRef' not in keys:
kwds['timeDeltaToRef']=pd.to_timedelta('0 seconds')
if 'timeDeltaToT' not in keys:
kwds['timeDeltaToT']=pd.to_timedelta('0 seconds')
# FWVB
if 'pFWVBFilterFunction' not in keys:
kwds['pFWVBFilterFunction']=lambda df: (df.CONT_ID.astype(int).isin([1001])) & (df.W0LFK.astype(float)>0)
# FWVB Attribute (Size)
if 'pFWVBAttribute' not in keys:
kwds['pFWVBAttribute']='W0LFK'
if 'pFWVBAttributeApplyFunction' not in keys:
kwds['pFWVBAttributeApplyFunction']=lambda x: pd.to_numeric(x,errors='coerce') # .apply(kwds['pFWVBAttributeApplyFunktion'])
if 'pFWVBAttributeApplyFunctionNaNValue' not in keys:
kwds['pFWVBAttributeApplyFunctionNaNValue']=0 # .fillna(kwds['pFWVBAttributeApplyFunktionNaNValue']).astype(float)
if 'pFWVBAttributeAsc' not in keys:
kwds['pFWVBAttributeAsc']=False
if 'pFWVBAttributeRefSize' not in keys:
kwds['pFWVBAttributeRefSize']=10**2
if 'quantil_pFWVBAttributeHigh' not in keys:
kwds['quantil_pFWVBAttributeHigh']=1.
if 'quantil_pFWVBAttributeLow' not in keys:
kwds['quantil_pFWVBAttributeLow']=.0
# FWVB Measure (Color)
if 'pFWVBMeasure' not in keys:
kwds['pFWVBMeasure']='FWVB~*~*~*~W'
if 'pFWVBMeasureInRefPerc' not in keys:
kwds['pFWVBMeasureInRefPerc']=True
if 'pFWVBMeasureAlpha' not in keys:
kwds['pFWVBMeasureAlpha']=0.9
if 'pFWVBMeasureColorMap' not in keys:
kwds['pFWVBMeasureColorMap']=plt.cm.autumn
if 'pFWVBMeasureClip' not in keys:
kwds['pFWVBMeasureClip']=False
# 3Classes
if 'pFWVBMeasure3Classes' not in keys:
kwds['pFWVBMeasure3Classes']=False
# CatTexts (werden verwendet wenn 3Classes Wahr gesetzt ist)
if 'pMCatTopText' not in keys:
kwds['pMCatTopText']='Top'
if 'pMCatMidText' not in keys:
kwds['pMCatMidText']='Middle'
if 'pMCatBotText' not in keys:
kwds['pMCatBotText']='Bottom'
# CatAttribs (werden verwendet wenn 3Classes Wahr gesetzt ist)
if 'pMCatTopAlpha' not in keys:
kwds['pMCatTopAlpha']=0.9
if 'pMCatTopColor' not in keys:
kwds['pMCatTopColor']='palegreen'
if 'pMCatTopClip' not in keys:
kwds['pMCatTopClip']=False
if 'pMCatMidAlpha' not in keys:
kwds['pMCatMidAlpha']=0.9
if 'pMCatMidColorMap' not in keys:
kwds['pMCatMidColorMap']=plt.cm.autumn
if 'pMCatMidClip' not in keys:
kwds['pMCatMidClip']=False
if 'pMCatBotAlpha' not in keys:
kwds['pMCatBotAlpha']=0.9
if 'pMCatBotColor' not in keys:
kwds['pMCatBotColor']='violet'
if 'pMCatBotClip' not in keys:
kwds['pMCatBotClip']=False
# CBFixedLimits
if 'pFWVBMeasureCBFixedLimits' not in keys:
kwds['pFWVBMeasureCBFixedLimits']=False
if 'pFWVBMeasureCBFixedLimitLow' not in keys:
kwds['pFWVBMeasureCBFixedLimitLow']=.10
if 'pFWVBMeasureCBFixedLimitHigh' not in keys:
kwds['pFWVBMeasureCBFixedLimitHigh']=.95
# CB
if 'CBFraction' not in keys:
kwds['CBFraction']=0.05
if 'CBHpad' not in keys:
kwds['CBHpad']=0.0275
if 'CBLabelPad' not in keys:
kwds['CBLabelPad']=-50
if 'CBTicklabelsHPad' not in keys:
kwds['CBTicklabelsHPad']=0
if 'CBAspect' not in keys:
kwds['CBAspect']=10.
if 'CBShrink' not in keys:
kwds['CBShrink']=0.3
if 'CBAnchorHorizontal' not in keys:
kwds['CBAnchorHorizontal']=0.
if 'CBAnchorVertical' not in keys:
kwds['CBAnchorVertical']=0.2
# CBLegend (3Classes)
if 'CBLe3cTopVPad' not in keys:
kwds['CBLe3cTopVPad']=1+1*1/4
if 'CBLe3cMidVPad' not in keys:
kwds['CBLe3cMidVPad']=.5
if 'CBLe3cBotVPad' not in keys:
kwds['CBLe3cBotVPad']=0-1*1/4
if 'CBLe3cSySize' not in keys:
kwds['CBLe3cSySize']=10**2
if 'CBLe3cSyType' not in keys:
kwds['CBLe3cSyType']='o'
# ROHR
if 'pROHRFilterFunction' not in keys:
kwds['pROHRFilterFunction']=lambda df: (df.KVR.astype(int).isin([2])) & (df.CONT_ID.astype(int).isin([1001])) & (df.DI.astype(float)>0)
# pROHR (PIPE-Line: Size and Color)
if 'pROHRAttribute' not in keys:
kwds['pROHRAttribute']='DI'
if 'pROHRAttributeApplyFunction' not in keys:
kwds['pROHRAttributeApplyFunction']=lambda x: pd.to_numeric(x,errors='coerce') # .apply(kwds['pROHRAttributeApplyFunktion'])
if 'pROHRAttributeApplyFunctionNaNValue' not in keys:
kwds['pROHRAttributeApplyFunctionNaNValue']=0 # .fillna(kwds['pROHRAttributeApplyFunktionNaNValue']).astype(float)
if 'pROHRAttributeAsc' not in keys:
kwds['pROHRAttributeAsc']=False
if 'pROHRAttributeLs' not in keys:
kwds['pROHRAttributeLs']='-'
if 'pROHRAttributeRefSize' not in keys:
kwds['pROHRAttributeRefSize']=1.
if 'pROHRAttributeSizeMin' not in keys:
kwds['pROHRAttributeSizeMin']=None
if 'pROHRAttributeColorMap' not in keys:
kwds['pROHRAttributeColorMap']=plt.cm.binary
if 'pROHRAttributeColorMapUsageStart' not in keys:
kwds['pROHRAttributeColorMapUsageStart']=1./3.
if 'quantil_pROHRAttributeHigh' not in keys:
kwds['quantil_pROHRAttributeHigh']=1.
if 'quantil_pROHRAttributeLow' not in keys:
kwds['quantil_pROHRAttributeLow']=.75
# pROHR (PIPE-Marker: Size and Color)
if 'pROHRMeasure' not in keys:
kwds['pROHRMeasure']='ROHR~*~*~*~QMAV'
if 'pROHRMeasureApplyFunction' not in keys:
kwds['pROHRMeasureApplyFunction']=lambda x: math.fabs(x)
if 'pROHRMeasureMarker' not in keys:
kwds['pROHRMeasureMarker']='.'
if 'pROHRMeasureRefSize' not in keys:
kwds['pROHRMeasureRefSize']=1.0
if 'pROHRMeasureSizeMin' not in keys:
kwds['pROHRMeasureSizeMin']=None
if 'pROHRMeasureColorMap' not in keys:
kwds['pROHRMeasureColorMap']=plt.cm.cool
if 'pROHRMeasureColorMapUsageStart' not in keys:
kwds['pROHRMeasureColorMapUsageStart']=0.
# NRCVs to be displayed
if 'pFIGNrcv' not in keys:
kwds['pFIGNrcv']=None #['KNOT~PKON-Knoten~\S*~\S+~QM']
if 'pFIGNrcvTxt' not in keys:
kwds['pFIGNrcvTxt']=None #['Kontrolle DH']
if 'pFIGNrcvFmt' not in keys:
kwds['pFIGNrcvFmt']='{:12s}: {:8.2f} {:6s}'
if 'pFIGNrcvPercFmt' not in keys:
kwds['pFIGNrcvPercFmt']=' {:6.1f}%'
if 'pFIGNrcvXStart' not in keys:
kwds['pFIGNrcvXStart']=.5
if 'pFIGNrcvYStart' not in keys:
kwds['pFIGNrcvYStart']=.5
# User Heat Balances to be displayed
if 'pFWVBGCategory' not in keys:
kwds['pFWVBGCategory']=None #['BLNZ1u5u7']
if 'pFWVBGCategoryUnit' not in keys:
kwds['pFWVBGCategoryUnit']='[kW]'
if 'pFWVBGCategoryCatFmt' not in keys:
kwds['pFWVBGCategoryCatFmt']='{:12s}: {:6.1f} {:4s}'
if 'pFWVBGCategoryPercFmt' not in keys:
kwds['pFWVBGCategoryPercFmt']=' {:6.1f}%'
if 'pFWVBGCategory3cFmt' not in keys:
kwds['pFWVBGCategory3cFmt']=' {:5d}/{:5d}/{:5d}'
if 'pFWVBGCategoryXStart' not in keys:
kwds['pFWVBGCategoryXStart']=.1
if 'pFWVBGCategoryYStart' not in keys:
kwds['pFWVBGCategoryYStart']=.9
# VICs
if 'pVICsDf' not in keys:
kwds['pVICsDf']=None #pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']})
if 'pVICsPercFmt' not in keys:
kwds['pVICsPercFmt']='{:12s}: {:6.1f}%'
if 'pVICsFmt' not in keys:
kwds['pVICsFmt']='{:12s}: {:6.1f} {:6s}'
if 'pVICsXStart' not in keys:
kwds['pVICsXStart']=.5
if 'pVICsYStart' not in keys:
kwds['pVICsYStart']=.1
# Figure
if 'pltTitle' not in keys:
kwds['pltTitle']='pltNetDHUS'
if 'figFrameon' not in keys:
kwds['figFrameon']=True
if 'figEdgecolor' not in keys:
kwds['figEdgecolor']='black'
if 'figFacecolor' not in keys:
kwds['figFacecolor']='white'
# Plausis
if kwds['pFWVBMeasure3Classes'] and not kwds['pFWVBMeasureCBFixedLimits']:
kwds['pFWVBMeasureCBFixedLimits']=True
logger.debug("{0:s}kwd {1:s} set to {2:s} because kwd {3:s}={4:s}".format(logStr,'pFWVBMeasureCBFixedLimits',str(kwds['pFWVBMeasureCBFixedLimits']),'pFWVBMeasure3Classes',str(kwds['pFWVBMeasure3Classes'])))
keys = sorted(kwds.keys())
logger.debug("{0:s}keys: {1:s}".format(logStr,str(keys)))
for key in keys:
value=kwds[key]
logger.debug("{0:s}kwd {1:s}: {2:s}".format(logStr,key,str(value)))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
try:
# 2 Szenrariumzeiten ermitteln ===============================================
firstTime=self.mx.df.index[0]
if isinstance(kwds['timeDeltaToRef'],pd.Timedelta):
timeRef=firstTime+kwds['timeDeltaToRef']
else:
logStrFinal="{:s}{:s} not Type {:s}.".format(logStr,'timeDeltaToRef','pd.Timedelta')
logger.error(logStrFinal)
raise RmError(logStrFinal)
if isinstance(kwds['timeDeltaToT'],pd.Timedelta):
timeT=firstTime+kwds['timeDeltaToT']
else:
logStrFinal="{:s}{:s} not Type {:s}.".format(logStr,'timeDeltaToT','pd.Timedelta')
logger.error(logStrFinal)
raise RmError(logStrFinal)
# Vektorergebnisse zu den 2 Zeiten holen ===============================================
timesReq=[]
timesReq.append(timeRef)
timesReq.append(timeT)
plotTimeDfs=self.mx.getMxsVecsFileData(timesReq=timesReq)
timeRefIdx=0
timeTIdx=1
# Sachdatenbasis ===============================================
vROHR=self.xm.dataFrames['vROHR']
vKNOT=self.xm.dataFrames['vKNOT']
vFWVB=self.xm.dataFrames['vFWVB']
vNRCV_Mx1=self.xm.dataFrames['vNRCV_Mx1']
if isinstance(kwds['pVICsDf'],pd.core.frame.DataFrame):
vFWVB=vFWVB.merge(kwds['pVICsDf'],left_on='NAME_i',right_on='Knotenname',how='left')
vFWVB.rename(columns={'Kundenname':'VIC'},inplace=True)
vFWVB.drop('Knotenname',axis=1,inplace=True)
# Einheit der Measures ermitteln (fuer Annotationen)
pFWVBMeasureCh=self.mx.mx1Df[self.mx.mx1Df['Sir3sID'].str.startswith(kwds['pFWVBMeasure'])]
pFWVBMeasureUNIT=pFWVBMeasureCh.iloc[0].UNIT
pFWVBMeasureATTRTYPE=pFWVBMeasureCh.iloc[0].ATTRTYPE
pROHRMeasureCh=self.mx.mx1Df[self.mx.mx1Df['Sir3sID'].str.startswith(kwds['pROHRMeasure'])]
pROHRMeasureUNIT=pROHRMeasureCh.iloc[0].UNIT
pROHRMeasureATTRTYPE=pROHRMeasureCh.iloc[0].ATTRTYPE
# Sachdaten annotieren mit Spalte Measure
# FWVB
pFWVBMeasureValueRaw=plotTimeDfs[timeTIdx][kwds['pFWVBMeasure']].iloc[0]
pFWVBMeasureValueRefRaw=plotTimeDfs[timeRefIdx][kwds['pFWVBMeasure']].iloc[0]
pFWVBMeasureValue=[None for m in pFWVBMeasureValueRaw]
pFWVBMeasureValueRef=[None for m in pFWVBMeasureValueRefRaw]
for idx in range(len(pFWVBMeasureValueRaw)):
mx2Idx=vFWVB['mx2Idx'].iloc[idx]
m=pFWVBMeasureValueRaw[mx2Idx]
pFWVBMeasureValue[idx]=m
m=pFWVBMeasureValueRefRaw[mx2Idx]
pFWVBMeasureValueRef[idx]=m
if kwds['pFWVBMeasureInRefPerc']: # auch in diesem Fall traegt die Spalte Measure das Ergebnis
pFWVBMeasureValuePerc=[float(m)/float(mRef) if float(mRef) >0 else 1 for m,mRef in zip(pFWVBMeasureValue,pFWVBMeasureValueRef)]
pFWVB=vFWVB.assign(Measure=pd.Series(pFWVBMeasureValuePerc)) #!
else:
pFWVB=vFWVB.assign(Measure=pd.Series(pFWVBMeasureValue)) #!
pFWVB=pFWVB.assign(MeasureOrig=pd.Series(pFWVBMeasureValue))
pFWVB=pFWVB.assign(MeasureRef=pd.Series(pFWVBMeasureValueRef))
# Sachdaten annotieren mit Spalte MCategory
pFWVBCat=[]
for index, row in pFWVB.iterrows():
if row.Measure >= kwds['pFWVBMeasureCBFixedLimitHigh']:
pFWVBCat.append(kwds['pMCatTopText'])
elif row.Measure <= kwds['pFWVBMeasureCBFixedLimitLow']:
pFWVBCat.append(kwds['pMCatBotText'])
else:
pFWVBCat.append(kwds['pMCatMidText'])
pFWVB=pFWVB.assign(MCategory=pd.Series(pFWVBCat))
# Sachdaten annotieren mit Spalte GCategory (mit den verlangten Waermebilanzen zu denen ein FWVB gehoert)
if isinstance(kwds['pFWVBGCategory'],list):
sCatReq=set(kwds['pFWVBGCategory'])
pFWVBCat=[]
for index, row in pFWVB.iterrows():
gCat=row.WBLZ
sCat=set(gCat)
s=sCat.intersection(sCatReq)
if len(s) == 0:
pFWVBCat.append('')
elif len(s) > 1:
pFWVBCat.append("{!s:s}".format(s))
else:
pFWVBCat.append(s.pop())
pFWVB=pFWVB.assign(GCategory=pd.Series(pFWVBCat))
else:
pFWVB=pFWVB.assign(GCategory=pd.Series())
# ROHR
pROHRMeasureValueRaw=plotTimeDfs[timeTIdx][kwds['pROHRMeasure']].iloc[0]
pROHRMeasureValue=[None for m in pROHRMeasureValueRaw]
for idx in range(len(pROHRMeasureValueRaw)):
mx2Idx=vROHR['mx2Idx'].iloc[idx]
m=pROHRMeasureValueRaw[mx2Idx]
mApplied=kwds['pROHRMeasureApplyFunction'](m)
pROHRMeasureValue[idx]=mApplied
pROHR=vROHR.assign(Measure=pd.Series(pROHRMeasureValue)) #!
# ========================================
# ROHR Attribute-Behandlung wg. float & Filter
# ========================================
pROHR[kwds['pROHRAttribute']]=pROHR[kwds['pROHRAttribute']].apply(kwds['pROHRAttributeApplyFunction'])
pROHR[kwds['pROHRAttribute']]=pROHR[kwds['pROHRAttribute']].fillna(kwds['pROHRAttributeApplyFunctionNaNValue']).astype(float)
# ROHRe filtern
row,col=pROHR.shape
logger.debug("{:s}pROHR vor filtern: Zeilen: {:d}".format(logStr,row))
f=kwds['pROHRFilterFunction']
logger.debug("{:s}pltROHR Filterfunktion: {:s}".format(logStr,str(f)))
pltROHR=pROHR[f] #!
row,col=pltROHR.shape
logger.debug("{:s}pltROHR nach filtern: Zeilen: {:d}".format(logStr,row))
# ========================================
# FWVB Attribute-Behandlung wg. float & Filter
# ========================================
pFWVB[kwds['pFWVBAttribute']]=pFWVB[kwds['pFWVBAttribute']].apply(kwds['pFWVBAttributeApplyFunction'])
pFWVB[kwds['pFWVBAttribute']]=pFWVB[kwds['pFWVBAttribute']].fillna(kwds['pFWVBAttributeApplyFunctionNaNValue']).astype(float)
# FWVB filtern
row,col=pFWVB.shape
logger.debug("{:s}pFWVB vor filtern: Zeilen: {:d}".format(logStr,row))
f=kwds['pFWVBFilterFunction']
logger.debug("{:s}pltFWVB Filterfunktion: {:s}".format(logStr,str(f)))
pltFWVB=pFWVB[f] #!
row,col=pltFWVB.shape
logger.debug("{:s}pltFWVB nach filtern: Zeilen: {:d}".format(logStr,row))
pltFWVB=pltFWVB[(pltFWVB[kwds['pFWVBAttribute']]<=pltFWVB[kwds['pFWVBAttribute']].quantile(kwds['quantil_pFWVBAttributeHigh']))
&
(pltFWVB[kwds['pFWVBAttribute']]>=pltFWVB[kwds['pFWVBAttribute']].quantile(kwds['quantil_pFWVBAttributeLow']))
]
logger.debug("{:s}pltROHR: quantil_pROHRAttributeHigh: {:f} f(): {:f}".format(logStr
,kwds['quantil_pROHRAttributeHigh']
,pltROHR[kwds['pROHRAttribute']].quantile(kwds['quantil_pROHRAttributeHigh'])
))
logger.debug("{:s}pltROHR: quantil_pROHRAttributeLow: {:f} f(): {:f}".format(logStr
,kwds['quantil_pROHRAttributeLow']
,pltROHR[kwds['pROHRAttribute']].quantile(kwds['quantil_pROHRAttributeLow'])
))
pltROHR=pltROHR[(pltROHR[kwds['pROHRAttribute']]<=pltROHR[kwds['pROHRAttribute']].quantile(kwds['quantil_pROHRAttributeHigh']))
&
(pltROHR[kwds['pROHRAttribute']]>=pltROHR[kwds['pROHRAttribute']].quantile(kwds['quantil_pROHRAttributeLow']))
]
row,col=pltROHR.shape
logger.debug("{:s}pltROHR nach selektieren: {:d}".format(logStr,row))
# Grundsortierung z-Order
pltFWVB=pltFWVB.sort_values(by=[kwds['pFWVBAttribute']],ascending=kwds['pFWVBAttributeAsc'])
pltROHR=pltROHR.sort_values(by=[kwds['pROHRAttribute']],ascending=kwds['pROHRAttributeAsc'])
# ############################################################
# ============================================================
# Plotten
# ============================================================
# ############################################################
pltNetFigAx(
pDf=pltROHR
,pXCor_i='pXCor_i' # colName
,pYCor_i='pYCor_i' # colName
,pXCor_k='pXCor_k' # colName
,pYCor_k='pYCor_k' # colName
,CBFraction=kwds['CBFraction']
,CBHpad=kwds['CBHpad']
,pltTitle=kwds['pltTitle']
,figFrameon=kwds['figFrameon']
#,figLinewidth=1.
,figEdgecolor=kwds['figEdgecolor']
,figFacecolor=kwds['figFacecolor']
)
fig = plt.gcf()
ax=plt.gca()
pFWVBrefSizeValue=pltFWVB[kwds['pFWVBAttribute']].std()
if pFWVBrefSizeValue < 1:
pFWVBrefSizeValue=pltFWVB[kwds['pFWVBAttribute']].mean()
logger.debug("{:s}pFWVBrefSizeValue (Attributwert): {:6.2f}".format(logStr,pFWVBrefSizeValue))
pFWVBSizeFactor=kwds['pFWVBAttributeRefSize']/pFWVBrefSizeValue
pcFWVB, CBLimitLow, CBLimitHigh = pltNetNodes(
# ALLG
pDf=pltFWVB
,pMeasure3Classes=kwds['pFWVBMeasure3Classes']
,CBFixedLimits=kwds['pFWVBMeasureCBFixedLimits']
,CBFixedLimitLow=kwds['pFWVBMeasureCBFixedLimitLow']
,CBFixedLimitHigh=kwds['pFWVBMeasureCBFixedLimitHigh']
# FWVB
,pMeasure='Measure'
,pAttribute=kwds['pFWVBAttribute']
,pSizeFactor=pFWVBSizeFactor
,pMeasureColorMap=kwds['pFWVBMeasureColorMap']
,pMeasureAlpha=kwds['pFWVBMeasureAlpha']
,pMeasureClip=kwds['pFWVBMeasureClip']
,pMCategory='MCategory'
,pMCatTopTxt=kwds['pMCatTopText'] # 'Top'
,pMCatBotTxt=kwds['pMCatBotText'] # 'Bottom'
,pMCatMidTxt=kwds['pMCatMidText'] # 'Middle'
,pMCatTopColor=kwds['pMCatTopColor']
,pMCatTopAlpha=kwds['pMCatTopAlpha']
,pMCatTopClip=kwds['pMCatTopClip']
,pMCatBotColor=kwds['pMCatBotColor']
,pMCatBotAlpha=kwds['pMCatBotAlpha']
,pMCatBotClip=kwds['pMCatBotClip']
,pMCatMidColorMap=kwds['pMCatMidColorMap']
,pMCatMidAlpha=kwds['pMCatMidAlpha']
,pMCatMidClip=kwds['pMCatMidClip']
)
#fig.sca(ax)
pROHRMeasureRefSizeValue=pltROHR['Measure'].std()
if pROHRMeasureRefSizeValue < 1:
pROHRMeasureRefSizeValue=pltROHR['Measure'].mean()
logger.debug("{:s}pROHRMeasureRefSizeValue: {:6.2f}".format(logStr,pROHRMeasureRefSizeValue))
pROHRMeasureSizeFactor=kwds['pROHRMeasureRefSize']/pROHRMeasureRefSizeValue
pROHRAttributeRefSizeValue=pltROHR[kwds['pROHRAttribute']].std()
if pROHRAttributeRefSizeValue < 1:
pROHRAttributeRefSizeValue=pltROHR[kwds['pROHRAttribute']].mean()
logger.debug("{:s}pROHRAttributeRefSizeValue: {:6.2f}".format(logStr,pROHRAttributeRefSizeValue))
pROHRAttributeSizeFactor=kwds['pROHRAttributeRefSize']/pROHRAttributeRefSizeValue
pltNetPipes(
pltROHR
,pAttribute=kwds['pROHRAttribute'] # Line
,pMeasure='Measure' # Marker
,pClip=False
,pAttributeLs=kwds['pROHRAttributeLs']
,pMeasureMarker=kwds['pROHRMeasureMarker']
,pAttributeColorMap=kwds['pROHRAttributeColorMap']
,pAttributeColorMapUsageStart=kwds['pROHRAttributeColorMapUsageStart']
,pAttributeSizeFactor=pROHRAttributeSizeFactor
,pAttributeSizeMin=kwds['pROHRAttributeSizeMin']
,pMeasureColorMap=kwds['pROHRMeasureColorMap']
,pMeasureColorMapUsageStart=kwds['pROHRMeasureColorMapUsageStart']
,pMeasureSizeFactor=pROHRMeasureSizeFactor
,pMeasureSizeMin=kwds['pROHRMeasureSizeMin']
)
# ============================================================
# Legend
# ============================================================
cax=pltNetLegendColorbar(
# ALLG
pc=pcFWVB # PathCollection aus pltNetNodes
,pDf=pltFWVB
,pMeasureInPerc=kwds['pFWVBMeasureInRefPerc']
,pMeasure3Classes=kwds['pFWVBMeasure3Classes']
# Ticks (TickLabels und TickValues)
,CBFixedLimits=kwds['pFWVBMeasureCBFixedLimits']
,CBFixedLimitLow=kwds['pFWVBMeasureCBFixedLimitLow']
,CBFixedLimitHigh=kwds['pFWVBMeasureCBFixedLimitHigh']
#
,pMeasure='Measure'
# Label
,pMeasureUNIT=pFWVBMeasureUNIT
,pMeasureTYPE=pFWVBMeasureATTRTYPE
# Geometrie
,CBFraction=kwds['CBFraction']
,CBHpad=kwds['CBHpad']
,CBLabelPad=kwds['CBLabelPad']
,CBTicklabelsHPad=kwds['CBTicklabelsHPad']
,CBAspect=kwds['CBAspect']
,CBShrink=kwds['CBShrink']
,CBAnchorHorizontal=kwds['CBAnchorHorizontal']
,CBAnchorVertical=kwds['CBAnchorVertical']
)
if kwds['pFWVBMeasure3Classes']:
bbTop, bbMid, bbBot = pltNetLegendColorbar3Classes(
pDf=pltFWVB
,pMCategory='MCategory'
,pMCatTopTxt=kwds['pMCatTopText']
,pMCatBotTxt=kwds['pMCatBotText']
,pMCatMidTxt=kwds['pMCatMidText']
,pMCatBotColor=kwds['pMCatBotColor']
,pMCatTopColor=kwds['pMCatTopColor']
,CBLe3cTopVPad=kwds['CBLe3cTopVPad']
,CBLe3cMidVPad=kwds['CBLe3cMidVPad']
,CBLe3cBotVPad=kwds['CBLe3cBotVPad']
,CBLe3cSySize=kwds['CBLe3cSySize']
,CBLe3cSyType=kwds['CBLe3cSyType']
)
TBAV=1.15*bbTop.y1
else:
TBAV=1.15
xmFileName,ext = os.path.splitext(os.path.basename(self.xm.xmlFile))
(wDir,modelDir,modelName,mx1File)=self.xm.getWDirModelDirModelName()
Projekt=self.xm.dataFrames['MODELL']['PROJEKT'].iloc[0]
Planer=self.xm.dataFrames['MODELL']['PLANER'].iloc[0]
Inst=self.xm.dataFrames['MODELL']['INST'].iloc[0]
Model="M: {:s}".format(xmFileName)
Result="E: {:s}".format(mx1File)
Times="TRef: {!s:s} T: {!s:s}".format(kwds['timeDeltaToRef'],kwds['timeDeltaToT']).replace('days','Tage')
pltNetLegendTitleblock(
text=str(Projekt)+'\n'+str(Planer)+'\n'+str(Inst)+'\n'+str(Model)+'\n'+str(Result)+'\n'+str(Times)
,anchorVertical=TBAV
)
# ============================================================
# NRCVs to be displayed in Net
# ============================================================
text=None
if isinstance(kwds['pFIGNrcv'],list) and isinstance(kwds['pFIGNrcvTxt'],list):
if len(kwds['pFIGNrcv']) == len(kwds['pFIGNrcvTxt']):
for idx,Sir3sIDRexp in enumerate(kwds['pFIGNrcv']):
try:
sCh=self.mx.mx1Df[self.mx.mx1Df['Sir3sID'].str.contains(Sir3sIDRexp)].iloc[0]
except:
logger.debug("{:s} Sir3sIDRexp {:s} nicht in .MX1".format(logStr,Sir3sIDRexp))
continue # NRCV wird ausgelassen
s=self.mx.df[sCh.Sir3sID]
v=s[timeT]
v0=s[timeRef]
if v0==0:
vp=100.
else:
vp=v/v0*100
fmtStr=kwds['pFIGNrcvFmt']
if kwds['pFWVBMeasureInRefPerc']:
fmtStr=fmtStr+kwds['pFIGNrcvPercFmt']
txt=fmtStr.format(kwds['pFIGNrcvTxt'][idx],v,sCh.UNIT,vp)
else:
txt=fmtStr.format(kwds['pFIGNrcvTxt'][idx],v,sCh.UNIT)
if text==None:
text=txt
else:
text=text+'\n'+txt
fig.sca(ax)
pltNetTextblock(text=text,x=kwds['pFIGNrcvXStart'],y=kwds['pFIGNrcvYStart'])
# ============================================================
# User Heat Balances to be displayed in Net
# ============================================================
vWBLZ=self.xm.dataFrames['vWBLZ']
vWBLZ_vKNOT=pd.merge(vWBLZ,vKNOT,left_on='OBJID',right_on='pk')
vWBLZ_vKNOT_pFWVB= | pd.merge(vWBLZ_vKNOT,pFWVB,left_on='NAME_y',right_on='NAME_i') | pandas.merge |
import os
import lightgbm as lgb
import neptune
from neptunecontrib.monitoring.lightgbm import neptune_monitor
from neptunecontrib.versioning.data import log_data_version
from neptunecontrib.api.utils import get_filepaths
from neptunecontrib.monitoring.reporting import send_binary_classification_report
from neptunecontrib.monitoring.utils import pickle_and_send_artifact
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score
from src.utils import read_config, check_env_vars
from src.models.utils import sample_negative_class
from src.features.const import V1_CAT_COLS_FEATURES
check_env_vars()
CONFIG = read_config(config_path=os.getenv('CONFIG_PATH'))
neptune.init(project_qualified_name=CONFIG.project)
FEATURES_DATA_PATH = CONFIG.data.features_data_path
PREDICTION_DATA_PATH = CONFIG.data.prediction_data_path
SAMPLE_SUBMISSION_PATH = CONFIG.data.sample_submission_path
FEATURE_NAME = 'v1'
MODEL_NAME = 'lgbm'
NROWS = None
LOG_MODEL = True
SEED = 1234
VALIDATION_PARAMS = {'validation_schema': 'holdout',
'validation_fraction': 0.26}
MODEL_PARAMS = {'num_leaves': 256,
'min_child_samples': 79,
'objective': 'binary',
'max_depth': 15,
'learning_rate': 0.02,
"boosting_type": "gbdt",
"subsample_freq": 3,
"subsample": 0.9,
"bagging_seed": 11,
"metric": 'auc',
"verbosity": -1,
'reg_alpha': 0.3,
'reg_lambda': 0.3,
'colsample_bytree': 0.9,
'seed': 1234
}
TRAINING_PARAMS = {'nrows': NROWS,
'negative_sample_fraction': 1.0,
'negative_sample_seed': SEED,
'num_boosting_rounds': 200, # 5000,
'early_stopping_rounds': 20 # 200
}
def fit_predict(train, valid, test, model_params, training_params, fine_tuning=False, log_model=False):
X_train = train.drop(['isFraud', 'TransactionDT', 'TransactionID'], axis=1)
y_train = train['isFraud']
X_valid = valid.drop(['isFraud', 'TransactionDT', 'TransactionID'], axis=1)
y_valid = valid['isFraud']
trn_data = lgb.Dataset(X_train, y_train)
val_data = lgb.Dataset(X_valid, y_valid)
if fine_tuning:
callbacks = None
else:
callbacks = [neptune_monitor()]
clf = lgb.train(model_params, trn_data,
training_params['num_boosting_rounds'],
feature_name=X_train.columns.tolist(),
categorical_feature=V1_CAT_COLS_FEATURES,
valid_sets=[trn_data, val_data],
early_stopping_rounds=training_params['early_stopping_rounds'],
callbacks=callbacks)
valid_preds = clf.predict(X_valid, num_iteration=clf.best_iteration)
if log_model:
pickle_and_send_artifact(clf, 'lightgbm.pkl')
if fine_tuning:
return valid_preds
else:
train_preds = clf.predict(X_train, num_iteration=clf.best_iteration)
X_test = test.drop(['TransactionDT', 'TransactionID'], axis=1)
test_preds = clf.predict(X_test, num_iteration=clf.best_iteration)
return train_preds, valid_preds, test_preds
def fmt_preds(y_pred):
return np.concatenate((1.0 - y_pred.reshape(-1, 1), y_pred.reshape(-1, 1)), axis=1)
def main():
print('loading data')
train_features_path = os.path.join(FEATURES_DATA_PATH, 'train_features_' + FEATURE_NAME + '.csv')
test_features_path = os.path.join(FEATURES_DATA_PATH, 'test_features_' + FEATURE_NAME + '.csv')
print('... train')
train = pd.read_csv(train_features_path, nrows=TRAINING_PARAMS['nrows'])
print('... test')
test = pd.read_csv(test_features_path, nrows=TRAINING_PARAMS['nrows'])
idx_split = int((1 - VALIDATION_PARAMS['validation_fraction']) * len(train))
train, valid = train[:idx_split], train[idx_split:]
train = sample_negative_class(train,
fraction=TRAINING_PARAMS['negative_sample_fraction'],
seed=TRAINING_PARAMS['negative_sample_seed'])
hyperparams = {**MODEL_PARAMS, **TRAINING_PARAMS, **VALIDATION_PARAMS}
print('starting experiment')
with neptune.create_experiment(name='model training',
params=hyperparams,
upload_source_files=get_filepaths(),
tags=[MODEL_NAME, 'features_{}'.format(FEATURE_NAME), 'training']):
print('logging data version')
log_data_version(train_features_path, prefix='train_features_')
log_data_version(test_features_path, prefix='test_features_')
print('training')
train_preds, valid_preds, test_preds = fit_predict(train, valid, test, MODEL_PARAMS, TRAINING_PARAMS,
log_model=LOG_MODEL)
print('logging metrics')
train_auc = roc_auc_score(train['isFraud'], train_preds)
valid_auc = roc_auc_score(valid['isFraud'], valid_preds)
neptune.send_metric('train_auc', train_auc)
neptune.send_metric('valid_auc', valid_auc)
send_binary_classification_report(valid['isFraud'], fmt_preds(valid_preds),
channel_name='valid_classification_report')
print('postprocessing predictions')
valid_predictions_path = os.path.join(PREDICTION_DATA_PATH,
'valid_prediction_{}_{}.csv'.format(FEATURE_NAME, MODEL_NAME))
test_predictions_path = os.path.join(PREDICTION_DATA_PATH,
'test_prediction_{}_{}.csv'.format(FEATURE_NAME, MODEL_NAME))
submission_path = os.path.join(PREDICTION_DATA_PATH,
'submission_{}_{}.csv'.format(FEATURE_NAME, MODEL_NAME))
submission = pd.read_csv(SAMPLE_SUBMISSION_PATH)
valid = pd.concat([valid, pd.DataFrame(valid[["TransactionDT", 'TransactionID']], columns=['prediction'])],
axis=1)
test = pd.concat([test[["TransactionDT", 'TransactionID']], pd.DataFrame(test_preds, columns=['prediction'])],
axis=1)
submission['isFraud'] = | pd.merge(submission, test, on='TransactionID') | pandas.merge |
import pandas as pd
from multiprocessing import Pool
import logging
from src.helper import create_logger
# Create the logger object
logger = create_logger('Parser', 'logs/Hedging.log',
logging.DEBUG, logging.WARNING)
class Parser():
"""
Parser class that calls parent and divide the data and
apply factor construction using multiprocessing
"""
def __init__(self, caller):
"""
Args:
caller (obj): caller class object
"""
self.factor = []
self.call = caller
def import_plugins(self, *args, **kwargs):
# Check bottom code to read a file and all the classes present in it.
return 0
# def find_all_factor(self):
# # Find factors in factors folder and their requirement
# # lis = os.listdir("src/hedging/factors/")
# self.factors = self.import_plugins(
# "src/hedging/factors/", create_instance=False, filter_abstract=False)
# return self.factors
# # a = self.import_submodules("src/hedging/factors/")
def run_factor(self, class_name, baby, time_chunk):
"""Create Factors
Args:
class_name : class in focus
baby (obj): Child Object
time_chunk (list/tuple): Time Deciles
Returns:
[type]: [description]
"""
obj = class_name(baby, market_name="^GSPC")
pool = Pool()
try:
results = pool.map(obj.calculate, list(time_chunk))
except:
pool.close()
return pd.DataFrame()
pool.close()
pool.join()
df = | pd.DataFrame() | pandas.DataFrame |
__author__ = "unknow"
__copyright__ = "Sprace.org.br"
__version__ = "1.0.0"
import pandas as pd
import sys
from math import sqrt
import sys
import os
import ntpath
import scipy.stats
import seaborn as sns
from matplotlib import pyplot as plt
#sys.path.append('/home/silvio/git/track-ml-1/utils')
#sys.path.append('../')
from core.utils.tracktop import *
#def create_graphic(reconstructed_tracks, original_tracks, tracks_diffs):
def create_graphic_org(**kwargs):
if kwargs.get('original_tracks'):
original_tracks = kwargs.get('original_tracks')
if kwargs.get('path_original_track'):
path_original_track = kwargs.get('path_original_track')
if kwargs.get('tracks'):
tracks = kwargs.get('tracks')
dfOriginal = pd.read_csv(original_tracks)
# dfOriginal2=dfOriginal.iloc(10:,:)
track_plot_new(dfOriginal, track_color = 'blue', n_tracks = tracks, title = 'Original to be Reconstructed', path=path_original_track)
#def create_graphic(reconstructed_tracks, original_tracks, tracks_diffs):
def create_graphic(**kwargs):
if kwargs.get('reconstructed_tracks'):
reconstructed_tracks = kwargs.get('reconstructed_tracks')
if kwargs.get('original_tracks'):
original_tracks = kwargs.get('original_tracks')
if kwargs.get('tracks_diffs'):
tracks_diffs = kwargs.get('tracks_diffs')
if kwargs.get('path_original_track'):
path_original_track = kwargs.get('path_original_track')
if kwargs.get('path_recons_track'):
path_recons_track = kwargs.get('path_recons_track')
'''
dftracks_diffs_aux = pd.read_csv(tracks_diffs)
dftracks_diffs=dftracks_diffs_aux.iloc[:,28:29]
dfOriginal = pd.read_csv(original_tracks)
dfaux3=dfOriginal.drop(dfOriginal.columns[0], axis=1)
dfaux32=dfaux3.drop(dfaux3.columns[0], axis=1)
dfaux32.insert(174, "diff", dftracks_diffs, True)
org = dfaux32.sort_values(by=['diff'])
dfRecons = pd.read_csv(reconstructed_tracks)
dfaux33=dfRecons.drop(dfRecons.columns[0], axis=1)
dfaux22=dfaux33.drop(dfaux33.columns[0], axis=1)
dfaux22.insert(65, "diff", dftracks_diffs, True)
org2 = dfaux22.sort_values(by=['diff'])
'''
dfRecons = pd.read_csv(reconstructed_tracks)
dfOriginal = pd.read_csv(original_tracks)
dftracks_diffs_aux = pd.read_csv(tracks_diffs)
dftracks_diffs=dftracks_diffs_aux.iloc[:,28:29]
dfRecons.insert(171, "diff", dftracks_diffs, True)
dfOriginal.insert(171, "diff", dftracks_diffs, True)
recaux=dfRecons.iloc[:,12:]
orgaux=dfOriginal.iloc[:,11:]
org = orgaux.sort_values(by=['diff'])
rec = recaux.sort_values(by=['diff'])
track_plot_new(org, track_color = 'blue', n_tracks = 19, title = 'Original to be Reconstructed', path=path_original_track)
#track_plot(org, track_color = 'blue', n_tracks = 20, title = 'Original to be Reconstructed', path=path_recons_track)
track_plot_new(rec, track_color = 'red', n_tracks = 19, title = 'reconstructed LSTM', path=path_recons_track)
#track_plot(org2, track_color = 'red', n_tracks = 20, title = 'reconstructed LSTM', path=path_original_track)
#def create_histogram(tracks_diffs):
def create_histogram(**kwargs):
if kwargs.get('tracks_diffs'):
tracks_diffs = kwargs.get('tracks_diffs')
if kwargs.get('path_hist'):
path_hist = kwargs.get('path_hist')
dftracks_diffs_aux = pd.read_csv(tracks_diffs)
dftracks_diffs = dftracks_diffs_aux.iloc[:,28:29]
dftracks_diffs_aux[29] = (dftracks_diffs_aux.iloc[:,28:29]/10)
dfff=dftracks_diffs_aux.sort_values(by=[29])
track_plot_hist(dfff.iloc[:,-1:], title = "AVG distance - Real x LSTM",
x_title = "Average of Distance (cm)",
y_title = "frequency",
n_bins = 20, bar_color = 'indianred',
path = path_hist)
#def create_histogram_seaborn(tracks_diffs,outputfig):
def create_histogram_seaborn(**kwargs):
if kwargs.get('tracks_diffs'):
tracks_diffs = kwargs.get('tracks_diffs')
if kwargs.get('outputfig'):
outputfig = kwargs.get('outputfig')
dftracks_diffs_aux = pd.read_csv(tracks_diffs)
dftracks_diffs = dftracks_diffs_aux.iloc[:,28:29]
dftracks_diffs_aux[29] = (dftracks_diffs_aux.iloc[:,28:29]/10)
dfff=dftracks_diffs_aux.sort_values(by=[29])
#sns_plot = sns.distplot(dfEval.iloc[:,[27]])
sns_plot = sns.distplot(dfff.iloc[:,-1:])
sns_plot.set(xlabel='Average Distance in MM', ylabel='Frequency')
plt.savefig(outputfig)
#def create_diference_per_track(reconstructed_tracks, original_tracks, eval_file):
def create_diference_per_track(**kwargs):
if kwargs.get('reconstructed_tracks'):
reconstructed_tracks = kwargs.get('reconstructed_tracks')
if kwargs.get('original_tracks'):
original_tracks = kwargs.get('original_tracks')
if kwargs.get('eval_file'):
eval_file = kwargs.get('eval_file')
dfOriginal = pd.read_csv(original_tracks)
dfReconstructed = pd.read_csv(reconstructed_tracks)
lines = dfOriginal.shape[0]
columns = dfOriginal.shape[1]
dfEval = pd.DataFrame(index=range(lines),columns=range(29))
ind_dfEval=0
#for hit in range(1, 28):
for hit in range(1, 16):
print(hit)
print(dfOriginal.shape)
print(dfReconstructed.shape)
#original track
dataOR=dfOriginal.iloc[:, [ (hit*8)+2,(hit*8)+3,(hit*8)+4 ]]
#reconstructed track
dataRE=dfReconstructed.iloc[:, [ (hit*8)+2,(hit*8)+3,(hit*8)+4 ]]
dftemp = pd.DataFrame(index=range(lines),columns=range(7))
dftemp[0]=dataOR.iloc[:,[0]]
dftemp[1]=dataOR.iloc[:,[1]]
dftemp[2]=dataOR.iloc[:,[2]]
dftemp[3]=dataRE.iloc[:,[0]]
dftemp[4]=dataRE.iloc[:,[1]]
dftemp[5]=dataRE.iloc[:,[2]]
dftemp[6]= (((dftemp[0]-dftemp[3])**2)+((dftemp[1]-dftemp[4])**2)+((dftemp[2]-dftemp[5])**2)).pow(1./2)
#Dataframe with geometric distante from hit to hit
dfEval[ind_dfEval] = dftemp[6]
ind_dfEval=ind_dfEval+1
ind=27
col = dfEval.loc[: , 0:26]
dfEval[27] = col.mean(axis=1)
dfEval.to_csv(eval_file)
def create_input_data(**kwargs):
#this function select a specific number of track with specific amount of hists
maximunAmountofHitsinDB=20
columnsperhit=8
firstColumnAfterParticle=9
if kwargs.get('event_prefix'):
event_prefix = kwargs.get('event_prefix')
if kwargs.get('output_prefix'):
output_prefix = kwargs.get('output_prefix')
if kwargs.get('aux_am_per_hit'):
aux_am_per_hit = kwargs.get('aux_am_per_hit')
if kwargs.get('min'):
min = kwargs.get('min')
if kwargs.get('max'):
max = kwargs.get('max')
if kwargs.get('maximunAmountofHitsinDB'):
maximunAmountofHitsinDB = kwargs.get('maximunAmountofHitsinDB')
if kwargs.get('columnsperhit'):
columnsperhit = kwargs.get('columnsperhit')
if kwargs.get('firstColumnAfterParticle'):
firstColumnAfterParticle = kwargs.get('firstColumnAfterParticle')
nrowsvar=1500000 #1000 #500000
skip=0
totdf = pd.read_csv(event_prefix,skiprows=0,nrows=1)
totdf = totdf.iloc[0:0]
for z in range(1):
dfOriginal = pd.read_csv(event_prefix,skiprows=skip,nrows=nrowsvar)
#print(dfOriginal.shape)
#dfOriginal2=dfOriginal.iloc[:,7:]
dfOriginal2=dfOriginal.iloc[:,firstColumnAfterParticle:]
#print(dfOriginal2)
#all zero columns in a single line
dfOriginal['totalZeros'] = (dfOriginal2 == 0.0).sum(axis=1)
dfOriginal['totalHits'] = maximunAmountofHitsinDB-(dfOriginal['totalZeros']/columnsperhit)
dfOriginal["totalHits"] = dfOriginal["totalHits"].astype(int)
#print("min: " , dfOriginal.iloc[:,:-1].min())
#print("max: " , dfOriginal.iloc[:,:-1].max())
print(int(min))
print(int(max)+1)
#print(dfOriginal['totalZeros'])
#print(dfOriginal['totalHits'])
for i in range(int(min), (int(max)+1)):
#print("i: " , i)
auxDF = dfOriginal.loc[dfOriginal['totalHits'] == i]
auxDF2 = auxDF.iloc[0:aux_am_per_hit,:]
totdf = totdf.append(auxDF2, sort=False)
print("auxDF2.shape: ", i, auxDF2.shape)
dfOriginal = dfOriginal.iloc[0:0]
auxDF2 = auxDF2.iloc[0:0]
auxDF = auxDF.iloc[0:0]
skip=skip+nrowsvar
totdf.drop('totalHits', axis=1, inplace=True)
totdf.drop('totalZeros', axis=1, inplace=True)
totdf.to_csv(output_prefix, index = False)
def put_each_hit_in_a_single_line_train(**kwargs):
if kwargs.get('event_prefix'):
event_prefix = kwargs.get('event_prefix')
if kwargs.get('output_prefix'):
output_prefix = kwargs.get('output_prefix')
print("event_prefix ", event_prefix, " output_prefix ", output_prefix)
auxdf3 = pd.DataFrame()
totfinaldfaux = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import argparse
labels = [
"0",
"B-answer",
"I-answer",
]
def find_answer_start(answer, sent):
answer = [x.lower() for x in answer]
sent = [x.lower() for x in sent]
for idx, word in enumerate(sent):
if answer[0] in word:
is_match = True
if len(answer) > 1:
for i in range(1, len(answer)):
c_len = idx+i
if c_len < len(sent):
c_ans = answer[i]
c_word = sent[idx+i]
if c_ans not in c_word:
is_match = False
if is_match:
return idx
return None
def get_tokens_and_labels(sentences, answer, sent_with_ans_id):
context_text = []
all_labels = []
# get the labels and tokens for current sentence
for idx, sent in enumerate(sentences):
context_text += sent # concatenate all sentences to a list of consecutive tokens
labels = np.zeros(len(sent))
if idx == sent_with_ans_id:
# the answer is contained in this sentence!
idx_s = find_answer_start(answer, sent)
if idx_s != None:
labels[idx_s] = 1
for i in range(len(answer)-1):
labels[idx_s + i + 1] = 2
else:
print('ERROR: could not find start of answer..')
print('ans: ', answer)
print('sentence: ', sent)
all_labels.append(labels)
l = np.concatenate(all_labels).ravel()
return context_text, l
def label_data(df):
data_map = {}
num_removed = 0
num_exact_match = 0
for index, row in df.iterrows():
sentences = row['context_raw']
sent_with_ans_id = row['answer_location']
answer = row['correct_answer_raw']
context_id = row['context_id']
context_text, labels = get_tokens_and_labels(sentences, answer, sent_with_ans_id)
# check if the current text is in the data map, and update the labels accordingly!
if row['context'] in data_map:
old_point = data_map[row['context']]
o_labels = old_point['labels'].copy()
add_answer_label = True
for idx, label in enumerate(labels):
if label > 0:
if o_labels[idx] == 0:
o_labels[idx] = label
elif label != o_labels[idx]:
# labels overlap, but are not an exact match
# -> don't add the current answer.
if add_answer_label: # only count the first for each answer
num_removed += 1
add_answer_label = False # this means the answers are overlapping, but not equal! -> don't want this
print('answer: ', answer)
print('existing point: ', old_point)
else:
# labels match
# -> don't add the current answer.
if add_answer_label:
num_exact_match += 1
add_answer_label = False
if add_answer_label:
old_point['labels'] = o_labels
old_point['answers'].append(answer)
data_map[row['context']] = old_point
else:
data_point = { 'context_id': context_id, 'id': index, 'labels': labels, 'tokens': context_text, 'answers': [answer] }
data_map[row['context']] = data_point
print('number of overlapping answers (removed): ', num_removed)
print('number of exact matched: ', num_exact_match)
for v in data_map.values():
v['labels'] = [ int(x) for x in v['labels']]
labeled_data = list(data_map.values())
print('num labeled data points: ', len(labeled_data))
return labeled_data
def main(args):
df = | pd.read_pickle(args.data_path) | pandas.read_pickle |
import sys
import numpy as np
import pandas as pd
import wgdi.base as base
class karyotype_mapping():
def __init__(self, options):
self.position = 'order'
self.limit_length = 5
for k, v in options:
setattr(self, str(k), v)
print(str(k), ' = ', v)
def karyotype_left(self, pairs, ancestor, gff1, gff2):
for index, row in ancestor.iterrows():
index1 = gff1[(gff1['chr'] == row[0]) & (
gff1['order'] >= row[1]) & (gff1['order'] <= row[2])].index
gff1.loc[index1, 'color'] = row[3]
gff1.loc[index1, 'classification'] = row[4]
data = pd.merge(pairs, gff1, left_on=0,
right_on=gff1.index, how='left')
data.drop_duplicates(subset=[1], inplace=True)
data.index = data[1].values
gff2.loc[data.index, 'color'] = data['color']
gff2.loc[data.index, 'classification'] = data['classification']
return gff2
def karyotype_top(self, pairs, ancestor, gff1, gff2):
for index, row in ancestor.iterrows():
index1 = gff2[(gff2['chr'] == row[0]) & (
gff2['order'] >= row[1]) & (gff2['order'] <= row[2])].index
gff2.loc[index1, 'color'] = row[3]
gff2.loc[index1, 'classification'] = row[4]
data = pd.merge(pairs, gff2, left_on=1,
right_on=gff2.index, how='left')
data.drop_duplicates(subset=[0], inplace=True)
data.index = data[0].values
gff1.loc[data.index, 'color'] = data['color']
gff1.loc[data.index, 'classification'] = data['classification']
return gff1
def karyotype_map(self, gff, lens):
gff = gff[gff['chr'].isin(lens.index)]
gff = gff[gff['color'].notnull()]
ancestor = []
for chr, group in gff.groupby(['chr']):
color, classid, arr = '', 1, []
for index, row in group.iterrows():
if color == row['color'] and classid == row['classification']:
arr.append(row['order'])
else:
if len(arr) >= int(self.limit_length):
ancestor.append(
[chr, min(arr), max(arr), color, classid, len(arr)])
arr = []
color = row['color']
classid = row['classification']
if len(ancestor) >= 1 and color == ancestor[-1][3] and classid == ancestor[-1][4] and chr == ancestor[-1][0]:
arr.append(ancestor[-1][1])
arr += np.random.randint(
ancestor[-1][1], ancestor[-1][2], size=ancestor[-1][5]-1).tolist()
ancestor.pop()
arr.append(row['order'])
if len(arr) >= int(self.limit_length):
ancestor.append(
[chr, min(arr), max(arr), color, classid, len(arr)])
ancestor = pd.DataFrame(ancestor)
for chr, group in ancestor.groupby([0]):
ancestor.loc[group.index[0], 1] = 1
ancestor.loc[group.index[-1], 2] = lens[chr]
ancestor[4] = ancestor[4].astype(int)
return ancestor[[0,1,2,3,4]]
def colinear_gene_pairs(self, bkinfo, gff1, gff2):
data = []
bkinfo = bkinfo.sort_values(by=['length'], ascending=[True])
for index, row in bkinfo.iterrows():
b1 = list(map(int, row['block1'].split('_')))
b2 = list(map(int, row['block2'].split('_')))
newgff1 = gff1[(gff1['chr'] == row['chr1'])
& (gff1['order'].isin(b1))]
newgff2 = gff2[(gff2['chr'] == row['chr2'])
& (gff2['order'].isin(b2))]
for i in range(len(b1)):
a, b = newgff1.loc[newgff1['order'] == b1[i]
].index[0], newgff2.loc[newgff2['order'] == b2[i]].index[0]
data.append([a, b])
data = pd.DataFrame(data)
return data
def run(self):
bkinfo = | pd.read_csv(self.blockinfo, index_col='id') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, os, platform, copy
import logging, re
import pandas as pd
import numpy as np
import itertools
import time, random
from tqdm import tqdm
tqdm.pandas()
from conf import getworkdir, conf
from models import run_model, CoxNMF_initialization
from utils import feature_analysis_no_fig
from dataset import getdata
from plotting import plot_simulation_atlas, plot_TCGA_atlas
import matplotlib.pyplot as plt
from scipy.stats import spearmanr
from main import prepare_results
from models import run_model_and_save_logs
def average_by_seeds(r):
if np.sum(r['Success'].astype(bool)) < len(r):
# One of the seeds doesn't generate results (failed to converge). Abandon all seeds.
return None
rmean = r.mean(skipna=True)
rstd = r.std(skipna=True)
r2 = r.iloc[0,:]
r2['random_seed'] = 'averaged'
for c in ['Fnorm', 'relative_error','CIndex','Runtime','silhouette_score',
'Accuracy','precision','recall','F1_score','IoU','Dice coefficient']:
r2.loc[c] = (rmean.loc[c], rstd.loc[c])
return r2
def find_k_hat(x):
imax = np.argmax([m for (m, v) in x['silhouette_score']])
x2 = x.iloc[imax,:]
return x2
def model_selection(x, metric='CIndex'):
#This is all possible combination of hyperparameters for one model
if metric == 'CIndex' or metric == 'silhouette_score':
idx = np.argmax([m for (m, v) in x[metric]])
x2 = x.iloc[idx,:]
return x2
def get_final_perfomance(args, metric='CIndex'):
# st = time.time()
if args.dataset == 'simulation':
folder = os.path.join(args.result_folder,
'Simulation=%s' % args.simulation_type,
'deathrate=%.2f_Wdistribution=%s_citol=%.2f_maxiter=%d' % (args.death_rate, args.W_distribution, args.ci_tol, args.max_iter),
'K=%d_P=%d_N=%d' % (args.K, args.P, args.N),
'Noise_X=%.2f_W=%.2f_H=%.2f' % (args.X_noise, args.W_noise, args.H_noise)
)
elif args.dataset == 'TCGA':
folder = os.path.join(args.result_folder,
'Cancer=%s' % args.cancer,
'meanq=%.2f_varq=%.2f_P=%d_N=%d_citol=%.2f_maxiter=%d' % \
(args.mean_quantile, args.var_quantile, args.P, args.N, args.ci_tol, args.max_iter)
)
if not os.path.exists(folder): print('Folder not exists!')
df = None
if args.dataset == 'simulation':
for seed in args.seed_list:
df_dir = os.path.join(folder, 'result_df_seed=%d.csv' % seed)
if not os.path.exists(df_dir):
print(folder)
raise Exception('All: Seed = %d path not exists!' % seed)
continue
if df is None:
df = pd.read_csv(df_dir, index_col = 0)
else:
df = df.append(pd.read_csv(df_dir, index_col = 0),ignore_index=True)
elif args.dataset == 'TCGA':
for K_hat in args.K_hat_list:
for seed in args.seed_list:
df_dir = os.path.join(folder, 'result_df_Khat=%d_seed=%d.csv' % (K_hat, seed))
if not os.path.exists(df_dir):
print(folder)
raise Exception('K_hat = %d Seed = %d path not exists!' % (K_hat, seed))
continue
if df is None:
df = pd.read_csv(df_dir, index_col = 0)
else:
df = df.append(pd.read_csv(df_dir, index_col = 0),ignore_index=True)
df = df.fillna(-1)
# =============================================================================
# Average by seeds
# =============================================================================
# print('Averaging results by seeds ...')
df_seed = df.groupby(['Model','K', 'K_hat','alpha','penalizer',
'W_H_initialization','W_normalization','H_normalization','beta_normalization',
]).apply(lambda r: average_by_seeds(r)).reset_index(drop=True)
df_seed.dropna(axis=0, how='any', inplace=True)
# =============================================================================
# This step help each model find their own best alpha & penalizer w.r.t. CIndex/relative_error
# =============================================================================
df_best_mdl = df_seed.groupby(['Model','K', 'K_hat', # For every K_hat, we select the best model w.r.t. CIndex/relative_error
]).apply(lambda x: model_selection(x, metric)).reset_index(drop=True)
# =============================================================================
# Upon their optimal parameter determined, find their optimal K_hat final solutions.
# =============================================================================
# print('Finding optimal K_hat w.r.t. CIndex ...')
df_khat = df_best_mdl.groupby(['Model',
]).apply(lambda x: find_k_hat(x)).reset_index(drop=True)
# print('Total time elapsed: %.2f seconds.' % (time.time() - st))
return df, df_seed, df_best_mdl, df_khat
def generate_main_table(args,
table,
metrics,
models,
X_noise,
metric='CIndex'):
coxnmf_params = pd.DataFrame(index = args.K_list, columns = ['Simulation type','X_noise','K','K_hat','alpha','penalizer','initialization'])
for K in args.K_list:
print('K = %.2f' % K)
args.K = K
args.X_noise = X_noise
X, W, H, t, e, labels = getdata(args)
args.P, args.N = X.shape
_, _, _, df_khat = get_final_perfomance(args, metric=metric)
for mo in models:
row = df_khat.loc[df_khat['Model'] == mo]
for me in metrics:
try:
value = row.loc[:,me].values[0]
except:
print('No metric %s found for %s' % (me, mo))
if isinstance(value, tuple):
value = '%.4f±%.2f' % (value[0],value[1])
table.loc[(me, mo),('X_noise=%.2f' % X_noise, '%d' % K)] = value
alpha = df_khat.loc[df_khat['Model'] == 'CoxNMF','alpha'].values[0]
penalizer = df_khat.loc[df_khat['Model'] == 'CoxNMF','penalizer'].values[0]
initialization = df_khat.loc[df_khat['Model'] == 'CoxNMF','W_H_initialization'].values[0]
K_hat = df_khat.loc[df_khat['Model'] == 'CoxNMF','K_hat'].values[0]
coxnmf_params.loc[K, :] = args.simulation_type, args.X_noise, K, K_hat, alpha, penalizer, initialization
return table, coxnmf_params
def generate_table_supplementary(args,
table,
metrics,
models,
metric='CIndex'):
alphalist = []
for K in args.K_list:
print('K = %.2f' % K)
for X_noise in args.X_noise_list:
print(' X_noise = %.2f' % X_noise)
args.K = K
args.X_noise = X_noise
X, W, H, t, e, labels = getdata(args)
args.P, args.N = X.shape
_, _, _, df_khat = get_final_perfomance(args, metric=metric)
alphalist += list(df_khat.iloc[:4,6].values)
for mo in models:
if mo == '':
table.loc[(K,mo),:] = ''
continue
if 'CoxNMF' in mo:
row = df_khat.loc[df_khat['Model'] == mo]
else:
row = df_khat.loc[df_khat['Model'] == mo]
for me in metrics:
if me == '':
table.loc[:,('X_noise=%.2f' % X_noise, me)] = ''
continue
value = row.loc[:,me].values[0]
if isinstance(value, tuple):
value = '%.4f±%.2f' % (value[0],value[1])
table.loc[(K,mo),('X_noise=%.2f' % X_noise, me)] = value
return table
def generate_table_massive_performances(args,
metric='CIndex'):
table = None
for K in args.K_list:
print('K = %.2f' % K)
for X_noise in args.X_noise_list:
print(' X_noise = %.2f' % X_noise)
args.K = K
args.X_noise = X_noise
X, W, H, t, e, labels = getdata(args)
args.P, args.N = X.shape
_, df_seed, _, _ = get_final_perfomance(args, metric=metric)
if table is None:
table = df_seed
else:
table = table.append(df_seed, ignore_index=True)
for r in range(table.shape[0]):
for c in range(table.shape[1]):
value = table.iloc[r,c]
if isinstance(value, tuple):
value = '%.4f±%.2f' % (value[0],value[1])
table.iloc[r,c] = value
return table
def get_simulation_performance(args,
table,
metric='CIndex'):
# =============================================================================
# Table 2-3: Univariate Multivariate of accuracy and relative error
# =============================================================================
models = ['TruncatedSVD','PCA','SparsePCA','NNDSVD','FactorAnalysis',
'NMF (CD)','NMF (MU)','SNMF','CoxNMF']
metrics = ['CIndex','relative_error','Accuracy','IoU','Dice coefficient']
coxnmf_params_all = pd.DataFrame()
for X_noise in args.X_noise_list:
print('X_noise = %.2f' % X_noise)
col1 = ['X_noise=%.2f' % X_noise]*len(args.K_list)
col2 = args.K_list
columns = np.array([col1, col2])
indices = [(me,mo) for me in metrics for mo in models]
for args.simulation_type in args.all_simulation_types:
print(args.simulation_type)
curr_table = pd.DataFrame(index = pd.MultiIndex.from_tuples(indices, names=['Metrics', 'Model']),
columns = | pd.MultiIndex.from_arrays(columns) | pandas.MultiIndex.from_arrays |
import datetime
import integrationutils as ius
import numpy as np
import os
import pathlib
import pandas as pd
import sqlite3
import sys
import warnings
''' Direct questions and concerns regarding this script to <NAME>
<EMAIL>
'''
def find_abund_col(df):
clist = []
for c in df.columns:
if 'bundances (per File)' in c:
clist.append(c)
if len(clist) == 1:
colname = clist[0]
print('Found abundance column', colname)
elif len(clist) == 0:
colname = None
print('Could not find abundance column in the Feature table')
elif len(clist) > 1:
colname = None
print('Found more than one potential abundance column in the Feature table')
return colname
def fname_to_instrument(infname):
'''Decides on the instrument name based on the file name'''
infname = infname.lower()
instrument_name = None
if ('lumos_' in infname) and ('faims' in infname):
instrument_name = 'Lumos_FAIMS'
elif ('lumos_' in infname) and ('faims' not in infname):
instrument_name = 'Lumos'
elif ('fusion_' in infname) and ('faims' in infname):
instrument_name = 'Fusion_FAIMS'
elif ('fusion_' in infname) and ('faims' not in infname):
instrument_name = 'Fusion'
elif ('qehf_' in infname):
instrument_name = 'QEHF'
elif ('qe_' in infname):
instrument_name = 'QE'
elif ('elite_' in infname):
instrument_name = 'Elite'
return instrument_name
def get_console_arg():
try:
assert (len(sys.argv) == 2), "Script requires only one parameter"
print('Input file',sys.argv[1])
injs = sys.argv[1]
print('Current work dir', os.getcwd())
return injs
except:
print('Could not pass json file to the script')
return None
def parse_cons_features(in_dfs):
""" If the Consensus Features table is present, returns the tuple with peak properties
(for features with charge 2 or higher):
(mean peak width in s, st dev of peak width in s,
numpy array of peak widths in s, numpy array of feature intensity)
If the Consensus Features table is absent, returns
(None, None, numpy array of zeroes, None)
"""
if 'Consensus Features' in in_dfs.keys():
try:
df = in_dfs['Consensus Features']
#df1 = df[df['Charge'] > 1]
diff_array = np.subtract(np.array(df1['Right RT [min]']),np.array(df1['Left RT [min]']))
diff_array = 60*diff_array
try:
for c in df1.columns:
if 'Abundances (per File)' in c:
abundcol = c
int_array = np.array(df1[abundcol])
except:
warnings.warn('Could not find Consensus Feature intinsities', UserWarning)
int_array = None
if int_array is None:
sum_abund = None
else:
sum_abund = np.sum(int_array)
print('Sum of all intensities is',sum_abund)
return (round(np.nanmean(diff_array),1),
round(np.nanstd(diff_array),1),
diff_array,
sum_abund, int_array)
except:
warnings.warn('Could not process Consensus Features table', UserWarning)
return (None, None, np.zeros(100,dtype=int), None, None)
else:
print('Could not find the Consensus Features table')
return (None, None, np.zeros(100,dtype=int), None, None)
def parse_table_input_file(in_dfs):
"""
Returns a tuple
(instrument name list, creation date list, instr from name list, instr from metadata list)
"""
try:
# Select the Input Files df from the dictionary
df = in_dfs['Input Files']
# Select the rows with .RAW files (df in principle can contain many .RAW files, .MSF etc)
df1 = df[df['File Name'].str.contains('.raw')]
# Create a list of base filenames for .RAW files
shortnames = []
for longname in df1['File Name']:
shortnames.append(pathlib.Path(longname).name)
filedates = list(df1['Creation Date'])
instr_from_metadata = list(df1['Instrument Name'])
instr_from_fnames = []
for n in shortnames:
instr_from_fnames.append(fname_to_instrument(n))
return (shortnames,filedates,instr_from_fnames,instr_from_metadata)
except:
warnings.warn('Could not process Input Files table', UserWarning)
return None
def print_all_rows(conn, table):
cur = conn.cursor()
sql_command = "SELECT * FROM " + table
cur.execute(sql_command)
rows = cur.fetchall()
for row in rows:
print(row)
cur.close()
return True
def return_last_rows(conn, table, index_col, num, colnames):
cur = conn.cursor()
sql_command = ('''SELECT * FROM (
SELECT * FROM ''' + table + " ORDER BY " +
index_col + ''' DESC LIMIT ''' + str(num) + ''')
ORDER BY ''' + index_col + " ASC;")
cur.execute(sql_command)
rows = cur.fetchall()
list_of_tuples = []
for row in rows:
list_of_tuples.append(row)
cur.close()
df = pd.DataFrame(list_of_tuples, columns=colnames)
print('Fetched', num, 'latest results from the QC database')
return df
def return_latest_psm_is(df, id_col, file_col, instr_col, psm_col):
''' Extracts info on PSM number, search ID and Instrument from the last row in DB
'''
last_row = df.iloc[-1]
search_id = last_row[id_col]
instr = last_row[instr_col]
psm = last_row[psm_col]
psm_string = str(psm) + ' PSMs in file ' + str(last_row[file_col])
print('String to put on the graph', psm_string)
return (search_id, instr, psm, psm_string)
def return_peptide_number(in_dfs):
''' Returns the number of peptides based on the Peptide table
'''
try:
# Select the Peptide Groups df from the dictionary
df = in_dfs['Peptide Groups']
return (len(df.index))
except:
warnings.warn('Could not process Peptide Groups table', UserWarning)
return None
def return_protein_number(in_dfs):
"""Returns the number of Master proteins based on the Proteins table"""
try:
# Select the Proteins df from the dictionary
df = in_dfs['Proteins']
# Select the Master proteins
df1 = df[df['Master'] == 'IsMasterProtein']
return (len(df1.index))
except:
warnings.warn('Could not process Proteins table', UserWarning)
return None
def testing_load_example_files():
df_dict = {}
df_dict['Proteins'] = pd.read_csv('TargetProtein.txt',delimiter='\t')
df_dict['Peptide Groups'] = pd.read_csv('TargetPeptideGroup.txt',delimiter='\t')
df_dict['PSMs'] = pd.read_csv('TargetPeptideSpectrumMatch.txt',delimiter='\t')
df_dict['MS/MS Spectrum Info'] = pd.read_csv('MSnSpectrumInfo.txt',delimiter='\t')
df_dict['Input Files'] = | pd.read_csv('WorkflowInputFile.txt',delimiter='\t') | pandas.read_csv |
import pandas as pd
import numpy as np
import sys
from tabulate import tabulate
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
import matplotlib.ticker as ticker
# from pyutils import *
# import dtale
# dtale.show(df)
# from pandas_profiling import ProfileReport
# profile = ProfileReport(tr, explorative=True)
def peek(df, rows=3):
concat1 = pd.concat([df.dtypes, df.iloc[:3, :].T], axis=1).reset_index()
concat1.columns = [''] * len(concat1.columns)
return concat1
def dtypes(df):
return pd.concat([df.nunique(), df.dtypes], keys=["count_unique", "dtype"], axis=1)
def bin(df,column, bins = 5):
return pd.cut(df[column], bins=5, right=True).value_counts(normalize=True).sort_index() * 100
def describe_df(df,floatfmt= '.3f'):
# Numerical
print("--" * 20)
print('Columns:', df.shape[1])
print('Rows:', df.shape[0])
print("Memory usage:", (f"({(sys.getsizeof(df) / 1024 ** 2):.2f} Mb)"))
print("--" * 20)
print('NUMERICAL VARIABLES:')
numerical = df.select_dtypes(include=np.number)
concatenated_numerical = pd.concat([
round(numerical.isnull().sum() / df.shape[0] * 100, 2).astype(str) + "%",
numerical.isnull().sum(),
numerical.count(),
numerical.min(),
numerical.mean(),
numerical.max()
], axis=1, keys=["%NULLS", "COUNT_NULLS", "NOT_NULL", 'MIN', 'MEAN', 'MAX'], sort=False).sort_values('COUNT_NULLS',
ascending=False).reset_index().rename(
columns={'index': ''})
t = numerical.mode().T
t.rename(columns={0: 'MODE'}, inplace=True)
concatenated_numerical = concatenated_numerical.merge(t, how='left', left_on='', right_on=t.index)
concatenated_numerical.index = concatenated_numerical.index + 1
concatenated_numerical = concatenated_numerical.iloc[:, [0, 4, 5, 6, 7, 1, 2, 3]]
print(tabulate(concatenated_numerical, headers=
[
'MIN',
'MEAN',
'MAX',
'MODE',
"%NULLS",
"#_NULLS",
"NOT_NULL",
], tablefmt="presto", colalign=("right"), floatfmt=floatfmt))
# Categorical
print('-----' * 20)
print()
print('CATEGORICAL VARIABLES:')
categorical = df.select_dtypes(['object','category'])
if categorical.shape[1] == 0:
print("No Categorical Variables")
else:
concatenated_categorical = pd.concat([
round(categorical.isnull().sum() / df.shape[0] * 100, 2).astype(str) + "%",
categorical.isnull().sum(),
categorical.count()
],
keys=["%<PASSWORD>",
"COUNT_NULLS",
"NOT_NULL"], axis=1, sort=False).sort_values('%NULLS', ascending=False).reset_index().rename(
columns={'index': ''})
max_unique = 5
u_strs = []
for col in categorical:
series = categorical.loc[categorical[col].notnull(), col]
n_unique = series.nunique()
if n_unique > max_unique:
u_strs.append(str(n_unique) + ' unique values')
else:
u_strs.append(str(series.unique()))
t = pd.DataFrame(u_strs, categorical.columns)
t = t.reset_index()
t = t.rename(columns={'index': '', 0: 'Unique_Values'})
concatenated_categorical = concatenated_categorical.merge(t, on='')
concatenated_categorical.index = concatenated_categorical.index + 1
print(tabulate(concatenated_categorical, headers=
[
"%NULLS",
"#_NULLS",
"NOT_NULL",
"Unique_Values"
], tablefmt="presto", colalign=("left")))
def find_pretty_grid(n_plots, max_cols=5):
"""Determine a good grid shape for subplots.
Tries to find a way to arange n_plots many subplots on a grid in a way
that fills as many grid-cells as possible, while keeping the number
of rows low and the number of columns below max_cols.
Parameters
----------
n_plots : int
Number of plots to arrange.
max_cols : int, default=5
Maximum number of columns.
Returns
-------
n_rows : int
Number of rows in grid.
n_cols : int
Number of columns in grid.
Examples
--------
>>> find_pretty_grid(16, 5)
(4, 4)
>>> find_pretty_grid(11, 5)
(3, 4)
>>> find_pretty_grid(10, 5)
(2, 5)
"""
# we could probably do something with prime numbers here
# but looks like that becomes a combinatorial problem again?
if n_plots % max_cols == 0:
# perfect fit!
# if max_cols is 6 do we prefer 6x1 over 3x2?
return int(n_plots / max_cols), max_cols
# min number of rows needed
min_rows = int(np.ceil(n_plots / max_cols))
best_empty = max_cols
best_cols = max_cols
for cols in range(max_cols, min_rows - 1, -1):
# we only allow getting narrower if we have more cols than rows
remainder = (n_plots % cols)
empty = cols - remainder if remainder != 0 else 0
if empty == 0:
return int(n_plots / cols), cols
if empty < best_empty:
best_empty = empty
best_cols = cols
return int(np.ceil(n_plots / best_cols)), best_cols
def make_subplots(n_plots, max_cols=5, row_height=3):
"""Create a harmonious subplot grid.
"""
n_rows, n_cols = find_pretty_grid(n_plots, max_cols=max_cols)
fig, axes = plt.subplots(n_rows, n_cols,
figsize=(4 * n_cols, row_height * n_rows),
constrained_layout=True)
# we don't want ravel to fail, this is awkward!
axes = np.atleast_2d(axes)
return fig, axes
def class_hists(data, column, target, bins="auto", ax=None, legend=False,
scale_separately=True):
"""Grouped univariate histograms.
Parameters
----------
data : pandas DataFrame
Input data to plot
column : column specifier
Column in the data to compute histograms over (must be continuous).
target : column specifier
Target column in data, must be categorical.
bins : string, int or array-like
Number of bins, 'auto' or bin edges. Passed to np.histogram_bin_edges.
We always show at least 5 bins for now.
ax : matplotlib axes
Axes to plot into
legend : boolean, default=False
Whether to create a legend.
scale_separately : boolean, default=True
Whether to scale each class separately.
Examples
--------
>>> from dabl.datasets import load_adult
>>> data = load_adult()
>>> class_hists(data, "age", "gender", legend=True)
<matplotlib...
"""
col_data = data[column].dropna()
if ax is None:
ax = plt.gca()
if col_data.nunique() > 10:
ordinal = False
# histograms
bin_edges = np.histogram_bin_edges(col_data, bins=bins)
if len(bin_edges) > 30:
bin_edges = np.histogram_bin_edges(col_data, bins=30)
counts = {}
for name, group in data.groupby(target)[column]:
this_counts, _ = np.histogram(group, bins=bin_edges)
counts[name] = this_counts
counts = pd.DataFrame(counts)
else:
ordinal = True
# ordinal data, count distinct values
counts = data.groupby(target)[column].value_counts().unstack(target)
if scale_separately:
# normalize by maximum
counts = counts / counts.max()
bottom = counts.max().max() * 1.1
for i, name in enumerate(counts.columns):
if ordinal:
ax.bar(range(counts.shape[0]), counts[name], width=.9,
bottom=bottom * i, tick_label=counts.index, linewidth=2,
edgecolor='k')
xmin, xmax = 0 - .5, counts.shape[0] - .5
else:
ax.bar(bin_edges[:-1], counts[name], bottom=bottom * i, label=name,
align='edge', width=(bin_edges[1] - bin_edges[0]) * .9)
xmin, xmax = bin_edges[0], bin_edges[-1]
ax.hlines(bottom * i, xmin=xmin, xmax=xmax,
linewidth=1)
if legend:
ax.legend()
ax.set_yticks(())
ax.set_xlabel(column)
return ax
def plot_univariate_classification(df, target_name):
df[[target_name]] = df[[target_name]].astype('object')
continuous_cols = list(df.select_dtypes("number").columns)
fig, axes = make_subplots(n_plots=len(continuous_cols), row_height=2)
for i, (ind, ax) in enumerate(zip(continuous_cols, axes.ravel())):
class_hists(df, continuous_cols[i],
target_name, ax=ax, legend=i == 0)
for j in range(i + 1, axes.size):
# turn off axis if we didn't fill last row
axes.ravel()[j].set_axis_off()
return plt.show()
def violin_plot_classification(df, target_name):
continuous_cols = list(df.select_dtypes("number").columns)
data = pd.DataFrame(StandardScaler().fit_transform(df[continuous_cols]), columns=df[continuous_cols].columns,
index=df[continuous_cols].index)
data = pd.concat([data, df[[target_name]]], axis=1)
data = pd.melt(data, id_vars=target_name,
var_name="features",
value_name='value')
plt.figure(figsize=(10, 10))
ax = sns.violinplot(x="features", y="value", hue=target_name, data=data, split=True, inner="quartile")
for i in range(len(np.unique(data["features"])) - 1):
ax.axvline(i + 0.5, color='grey', lw=1)
plt.xticks(rotation=20)
return plt.show()
def box_plot_classification(df, target_name):
df[target_name] = df[target_name].astype('object')
continuous_cols = list(df.select_dtypes("number").columns)
data = pd.DataFrame(StandardScaler().fit_transform(df[continuous_cols]), columns=df[continuous_cols].columns,
index=df[continuous_cols].index)
data = pd.concat([data, df[[target_name]]], axis=1)
data = pd.melt(data, id_vars=target_name,
var_name="features",
value_name='value')
plt.figure(figsize=(10, 10))
ax = sns.boxplot(x="features", y="value", hue=target_name, data=data)
for i in range(len(np.unique(data["features"])) - 1):
ax.axvline(i + 0.5, color='grey', lw=1)
plt.xticks(rotation=20)
return plt.show()
def plot_density_numerical_for_whole_dataframe(df):
continuous_cols = list(df.select_dtypes("number").columns)
fig, axes = make_subplots(n_plots=len(continuous_cols), row_height=2)
for i, (ind, ax) in enumerate(zip(continuous_cols, axes.ravel())):
sns.kdeplot(df[continuous_cols[i]], color='black', shade=True, legend=True, ax=ax)
for j in range(i + 1, axes.size):
# turn off axis if we didn't fill last row
axes.ravel()[j].set_axis_off()
return plt.show()
# def plot_histograms_categoricals_for_whole_dataframe(df):
# categorical_cols = list(df.select_dtypes("object").columns)
# fig, axes = make_subplots(n_plots=len(categorical_cols), row_height=2)
# for i, (ind, ax) in enumerate(zip(categorical_cols, axes.ravel())):
# ax = sns.histplot(x=df[categorical_cols[i]], data=df, color='green', ax=ax)
# ax = ax.set_xticklabels(labels=df[categorical_cols[i]].value_counts().index.values, rotation=90)
#
# for j in range(i + 1, axes.size):
# axes.ravel()[j].set_axis_off()
# # plt.xticks(rotate=90)
# plt.show()
def plot_single_numerical(df):
plt.figure()
sns.kdeplot(df, color="black", shade="gray")
return plt.show()
def ordered_barplot(df, variable_name, rotate=0):
s = df[~pd.isnull(df[[variable_name]])][variable_name]
chart = pd.value_counts(s).to_frame(name='data')
chart.index.name = 'labels'
chart = chart.reset_index().sort_values(['data', 'labels'], ascending=[False, True])
plt.figure(figsize=(12, 8))
ax = sns.barplot(x="labels", y="data", data=chart)
ncount = len(df[[variable_name]])
# plt.title('Distribution of Truck Configurations')
# ax.set_xticklabels(labels=tr.state, rotation=90)
plt.xlabel(variable_name)
plt.xticks(rotation=rotate)
# Make twin axis
ax2 = ax.twinx()
# Switch so count axis is on right, frequency on left
ax2.yaxis.tick_left()
ax.yaxis.tick_right()
# Also switch the labels over
ax.yaxis.set_label_position('right')
ax2.yaxis.set_label_position('left')
ax2.set_ylabel('Frequency [%]')
for p in ax.patches:
x = p.get_bbox().get_points()[:, 0]
y = p.get_bbox().get_points()[1, 1]
ax.annotate('{:.1f}%'.format(100. * y / ncount), (x.mean(), y), ha='center',
va='bottom') # set the alignment of the text
# Use a LinearLocator to ensure the correct number of ticks
ax.yaxis.set_major_locator(ticker.LinearLocator(11))
# Fix the frequency range to 0-100
ax2.set_ylim(0, 100)
ax.set_ylim(0, ncount)
# And use a MultipleLocator to ensure a tick spacing of 10
ax2.yaxis.set_major_locator(ticker.MultipleLocator(10))
# Need to turn the grid on ax2 off, otherwise the gridlines end up on top of the bars
ax2.grid(None)
ax2.grid(False)
return plt.show()
def ordered_barplot_h(df, variable_name, rotate=0):
s = df[~pd.isnull(df[[variable_name]])][variable_name]
chart = | pd.value_counts(s) | pandas.value_counts |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/8/27 15:59
Desc: REITs 行情及信息
http://quote.eastmoney.com/center/gridlist.html#fund_reits_all
https://www.jisilu.cn/data/cnreits/#CnReits
"""
import pandas as pd
import requests
def reits_realtime_em() -> pd.DataFrame:
"""
东方财富网-行情中心-REITs-沪深 REITs
http://quote.eastmoney.com/center/gridlist.html#fund_reits_all
:return: 沪深 REITs-实时行情
:rtype: pandas.DataFrame
"""
url = "http://95.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "20",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:9 e:97,m:0 t:10 e:97",
"fields": "f2,f3,f4,f5,f6,f12,f14,f15,f16,f17,f18",
"_": "1630048369992",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(
{
"index": "序号",
"f2": "最新价",
"f3": "涨跌幅",
"f4": "涨跌额",
"f5": "成交量",
"f6": "成交额",
"f12": "代码",
"f14": "名称",
"f15": "最高价",
"f16": "最低价",
"f17": "开盘价",
"f18": "昨收",
},
axis=1,
inplace=True,
)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"成交量",
"成交额",
"开盘价",
"最高价",
"最低价",
"昨收",
]
]
return temp_df
def reits_info_jsl() -> pd.DataFrame:
"""
集思录-实时数据-REITs-A股 REITs
https://www.jisilu.cn/data/cnreits/#CnReits
:return: A股 REITs
:rtype: pandas.DataFrame
"""
url = "https://www.jisilu.cn/data/cnreits/list/"
params = {"___jsl": "LST___t=1630052485199"}
payload = {"rp": "50", "page": "1"}
r = requests.get(url, params=params, json=payload)
data_json = r.json()
temp_df = pd.DataFrame([item["cell"] for item in data_json["rows"]])
temp_df.rename(
{
"fund_id": "代码",
"fund_nm": "简称",
"full_nm": "全称",
"project_type": "项目类型",
"price": "现价",
"increase_rt": "涨幅",
"volume": "成交额",
"nav": "净值",
"nav_dt": "净值日期",
"discount_rt": "折价率",
"maturity_dt": "到期日",
"fund_company": "基金公司",
"urls": "链接地址",
"last_dt": "更新日期",
"last_time": "更新时间",
"unit_total": "规模",
"left_year": "剩余年限",
},
axis=1,
inplace=True,
)
temp_df = temp_df[
[
"代码",
"简称",
"现价",
"涨幅",
"成交额",
"净值",
"净值日期",
"折价率",
"规模",
"到期日",
"剩余年限",
"全称",
"项目类型",
"基金公司",
]
]
temp_df['现价'] = pd.to_numeric(temp_df['现价'])
temp_df['涨幅'] = pd.t | o_numeric(temp_df['涨幅']) | pandas.to_numeric |
# LIBRARIES
# set up backend for ssh -x11 figures
import matplotlib
matplotlib.use('Agg')
# read and write
import os
import sys
import glob
import re
import fnmatch
import csv
import shutil
from datetime import datetime
# maths
import numpy as np
import pandas as pd
import math
import random
# miscellaneous
import warnings
import gc
import timeit
# sklearn
from sklearn.utils import resample
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, log_loss, roc_auc_score, \
accuracy_score, f1_score, precision_score, recall_score, confusion_matrix, average_precision_score
from sklearn.utils.validation import check_is_fitted
from sklearn.model_selection import KFold, PredefinedSplit, cross_validate
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression, ElasticNet
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
# Statistics
from scipy.stats import pearsonr, ttest_rel, norm
# Other tools for ensemble models building (<NAME>'s InnerCV class)
from hyperopt import fmin, tpe, space_eval, Trials, hp, STATUS_OK
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
# CPUs
from multiprocessing import Pool
# GPUs
from GPUtil import GPUtil
# tensorflow
import tensorflow as tf
# keras
from keras_preprocessing.image import ImageDataGenerator, Iterator
from keras_preprocessing.image.utils import load_img, img_to_array, array_to_img
from tensorflow.keras.utils import Sequence
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout, GlobalAveragePooling2D, concatenate
from tensorflow.keras import regularizers
from tensorflow.keras.optimizers import Adam, RMSprop, Adadelta
from tensorflow.keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, CSVLogger
from tensorflow.keras.losses import MeanSquaredError, BinaryCrossentropy
from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError, AUC, BinaryAccuracy, Precision, Recall, \
TruePositives, FalsePositives, FalseNegatives, TrueNegatives
from tensorflow_addons.metrics import RSquare, F1Score
# Plots
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from PIL import Image
from bioinfokit import visuz
# Model's attention
from keract import get_activations, get_gradients_of_activations
from scipy.ndimage.interpolation import zoom
# Survival
from lifelines.utils import concordance_index
# Necessary to define MyCSVLogger
import collections
import csv
import io
import six
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.compat import collections_abc
from tensorflow.keras.backend import eval
# Set display parameters
pd.set_option('display.max_rows', 200)
# CLASSES
class Basics:
"""
Root class herited by most other class. Includes handy helper functions
"""
def __init__(self):
# seeds for reproducibility
self.seed = 0
os.environ['PYTHONHASHSEED'] = str(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# other parameters
self.path_data = '../data/'
self.folds = ['train', 'val', 'test']
self.n_CV_outer_folds = 10
self.outer_folds = [str(x) for x in list(range(self.n_CV_outer_folds))]
self.modes = ['', '_sd', '_str']
self.id_vars = ['id', 'eid', 'instance', 'outer_fold']
self.instances = ['0', '1', '1.5', '1.51', '1.52', '1.53', '1.54', '2', '3']
self.ethnicities_vars_forgot_Other = \
['Ethnicity.White', 'Ethnicity.British', 'Ethnicity.Irish', 'Ethnicity.White_Other', 'Ethnicity.Mixed',
'Ethnicity.White_and_Black_Caribbean', 'Ethnicity.White_and_Black_African', 'Ethnicity.White_and_Asian',
'Ethnicity.Mixed_Other', 'Ethnicity.Asian', 'Ethnicity.Indian', 'Ethnicity.Pakistani',
'Ethnicity.Bangladeshi', 'Ethnicity.Asian_Other', 'Ethnicity.Black', 'Ethnicity.Caribbean',
'Ethnicity.African', 'Ethnicity.Black_Other', 'Ethnicity.Chinese', 'Ethnicity.Other_ethnicity',
'Ethnicity.Do_not_know', 'Ethnicity.Prefer_not_to_answer', 'Ethnicity.NA']
self.ethnicities_vars = \
['Ethnicity.White', 'Ethnicity.British', 'Ethnicity.Irish', 'Ethnicity.White_Other', 'Ethnicity.Mixed',
'Ethnicity.White_and_Black_Caribbean', 'Ethnicity.White_and_Black_African', 'Ethnicity.White_and_Asian',
'Ethnicity.Mixed_Other', 'Ethnicity.Asian', 'Ethnicity.Indian', 'Ethnicity.Pakistani',
'Ethnicity.Bangladeshi', 'Ethnicity.Asian_Other', 'Ethnicity.Black', 'Ethnicity.Caribbean',
'Ethnicity.African', 'Ethnicity.Black_Other', 'Ethnicity.Chinese', 'Ethnicity.Other',
'Ethnicity.Other_ethnicity', 'Ethnicity.Do_not_know', 'Ethnicity.Prefer_not_to_answer', 'Ethnicity.NA']
self.demographic_vars = ['Age', 'Sex'] + self.ethnicities_vars
self.names_model_parameters = ['target', 'organ', 'view', 'transformation', 'architecture', 'n_fc_layers',
'n_fc_nodes', 'optimizer', 'learning_rate', 'weight_decay', 'dropout_rate',
'data_augmentation_factor']
self.targets_regression = ['Age']
self.targets_binary = ['Sex']
self.models_types = ['', '_bestmodels']
self.dict_prediction_types = {'Age': 'regression', 'Sex': 'binary'}
self.dict_side_predictors = {'Age': ['Sex'] + self.ethnicities_vars_forgot_Other,
'Sex': ['Age'] + self.ethnicities_vars_forgot_Other}
self.organs = ['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal']
self.left_right_organs_views = ['Eyes_Fundus', 'Eyes_OCT', 'Arterial_Carotids', 'Musculoskeletal_Hips',
'Musculoskeletal_Knees']
self.dict_organs_to_views = {'Brain': ['MRI'],
'Eyes': ['Fundus', 'OCT'],
'Arterial': ['Carotids'],
'Heart': ['MRI'],
'Abdomen': ['Liver', 'Pancreas'],
'Musculoskeletal': ['Spine', 'Hips', 'Knees', 'FullBody'],
'PhysicalActivity': ['FullWeek']}
self.dict_organsviews_to_transformations = \
{'Brain_MRI': ['SagittalRaw', 'SagittalReference', 'CoronalRaw', 'CoronalReference', 'TransverseRaw',
'TransverseReference'],
'Arterial_Carotids': ['Mixed', 'LongAxis', 'CIMT120', 'CIMT150', 'ShortAxis'],
'Heart_MRI': ['2chambersRaw', '2chambersContrast', '3chambersRaw', '3chambersContrast', '4chambersRaw',
'4chambersContrast'],
'Musculoskeletal_Spine': ['Sagittal', 'Coronal'],
'Musculoskeletal_FullBody': ['Mixed', 'Figure', 'Skeleton', 'Flesh'],
'PhysicalActivity_FullWeek': ['GramianAngularField1minDifference', 'GramianAngularField1minSummation',
'MarkovTransitionField1min', 'RecurrencePlots1min']}
self.dict_organsviews_to_transformations.update(dict.fromkeys(['Eyes_Fundus', 'Eyes_OCT'], ['Raw']))
self.dict_organsviews_to_transformations.update(
dict.fromkeys(['Abdomen_Liver', 'Abdomen_Pancreas'], ['Raw', 'Contrast']))
self.dict_organsviews_to_transformations.update(
dict.fromkeys(['Musculoskeletal_Hips', 'Musculoskeletal_Knees'], ['MRI']))
self.organsviews_not_to_augment = []
self.organs_instances23 = ['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity']
self.organs_XWAS = \
['*', '*instances01', '*instances1.5x', '*instances23', 'Brain', 'BrainCognitive', 'BrainMRI', 'Eyes',
'EyesFundus', 'EyesOCT', 'Hearing', 'Lungs', 'Arterial', 'ArterialPulseWaveAnalysis', 'ArterialCarotids',
'Heart', 'HeartECG', 'HeartMRI', 'Abdomen', 'AbdomenLiver', 'AbdomenPancreas', 'Musculoskeletal',
'MusculoskeletalSpine', 'MusculoskeletalHips', 'MusculoskeletalKnees', 'MusculoskeletalFullBody',
'MusculoskeletalScalars', 'PhysicalActivity', 'Biochemistry', 'BiochemistryUrine', 'BiochemistryBlood',
'ImmuneSystem']
# Others
if '/Users/Alan/' in os.getcwd():
os.chdir('/Users/Alan/Desktop/Aging/Medical_Images/scripts/')
else:
os.chdir('/n/groups/patel/Alan/Aging/Medical_Images/scripts/')
gc.enable() # garbage collector
warnings.filterwarnings('ignore')
def _version_to_parameters(self, model_name):
parameters = {}
parameters_list = model_name.split('_')
for i, parameter in enumerate(self.names_model_parameters):
parameters[parameter] = parameters_list[i]
if len(parameters_list) > 11:
parameters['outer_fold'] = parameters_list[11]
return parameters
@staticmethod
def _parameters_to_version(parameters):
return '_'.join(parameters.values())
@staticmethod
def convert_string_to_boolean(string):
if string == 'True':
boolean = True
elif string == 'False':
boolean = False
else:
print('ERROR: string must be either \'True\' or \'False\'')
sys.exit(1)
return boolean
class Metrics(Basics):
"""
Helper class defining dictionaries of metrics and custom metrics
"""
def __init__(self):
# Parameters
Basics.__init__(self)
self.metrics_displayed_in_int = ['True-Positives', 'True-Negatives', 'False-Positives', 'False-Negatives']
self.metrics_needing_classpred = ['F1-Score', 'Binary-Accuracy', 'Precision', 'Recall']
self.dict_metrics_names_K = {'regression': ['RMSE'], # For now, R-Square is buggy. Try again in a few months.
'binary': ['ROC-AUC', 'PR-AUC', 'F1-Score', 'Binary-Accuracy', 'Precision',
'Recall', 'True-Positives', 'False-Positives', 'False-Negatives',
'True-Negatives'],
'multiclass': ['Categorical-Accuracy']}
self.dict_metrics_names = {'regression': ['RMSE', 'MAE', 'R-Squared', 'Pearson-Correlation'],
'binary': ['ROC-AUC', 'F1-Score', 'PR-AUC', 'Binary-Accuracy', 'Sensitivity',
'Specificity', 'Precision', 'Recall', 'True-Positives', 'False-Positives',
'False-Negatives', 'True-Negatives'],
'multiclass': ['Categorical-Accuracy']}
self.dict_losses_names = {'regression': 'MSE', 'binary': 'Binary-Crossentropy',
'multiclass': 'categorical_crossentropy'}
self.dict_main_metrics_names_K = {'Age': 'MAE', 'Sex': 'PR-AUC', 'imbalanced_binary_placeholder': 'PR-AUC'}
self.dict_main_metrics_names = {'Age': 'R-Squared', 'Sex': 'ROC-AUC',
'imbalanced_binary_placeholder': 'PR-AUC'}
self.main_metrics_modes = {'loss': 'min', 'R-Squared': 'max', 'Pearson-Correlation': 'max', 'RMSE': 'min',
'MAE': 'min', 'ROC-AUC': 'max', 'PR-AUC': 'max', 'F1-Score': 'max', 'C-Index': 'max',
'C-Index-difference': 'max'}
self.n_bootstrap_iterations = 1000
def rmse(y_true, y_pred):
return math.sqrt(mean_squared_error(y_true, y_pred))
def sensitivity_score(y, pred):
_, _, fn, tp = confusion_matrix(y, pred.round()).ravel()
return tp / (tp + fn)
def specificity_score(y, pred):
tn, fp, _, _ = confusion_matrix(y, pred.round()).ravel()
return tn / (tn + fp)
def true_positives_score(y, pred):
_, _, _, tp = confusion_matrix(y, pred.round()).ravel()
return tp
def false_positives_score(y, pred):
_, fp, _, _ = confusion_matrix(y, pred.round()).ravel()
return fp
def false_negatives_score(y, pred):
_, _, fn, _ = confusion_matrix(y, pred.round()).ravel()
return fn
def true_negatives_score(y, pred):
tn, _, _, _ = confusion_matrix(y, pred.round()).ravel()
return tn
self.dict_metrics_sklearn = {'mean_squared_error': mean_squared_error,
'mean_absolute_error': mean_absolute_error,
'RMSE': rmse,
'Pearson-Correlation': pearsonr,
'R-Squared': r2_score,
'Binary-Crossentropy': log_loss,
'ROC-AUC': roc_auc_score,
'F1-Score': f1_score,
'PR-AUC': average_precision_score,
'Binary-Accuracy': accuracy_score,
'Sensitivity': sensitivity_score,
'Specificity': specificity_score,
'Precision': precision_score,
'Recall': recall_score,
'True-Positives': true_positives_score,
'False-Positives': false_positives_score,
'False-Negatives': false_negatives_score,
'True-Negatives': true_negatives_score}
def _bootstrap(self, data, function):
results = []
for i in range(self.n_bootstrap_iterations):
data_i = resample(data, replace=True, n_samples=len(data.index))
results.append(function(data_i['y'], data_i['pred']))
return np.mean(results), np.std(results)
class PreprocessingMain(Basics):
"""
This class executes the code for step 01. It preprocesses the main dataframe by:
- reformating the rows and columns
- splitting the dataset into folds for the future cross validations
- imputing key missing data
- adding a new UKB instance for physical activity data
- formating the demographics columns (age, sex and ethnicity)
- reformating the dataframe so that different instances of the same participant are treated as different rows
- saving the dataframe
"""
def __init__(self):
Basics.__init__(self)
self.data_raw = None
self.data_features = None
self.data_features_eids = None
def _add_outer_folds(self):
outer_folds_split = pd.read_csv(self.path_data + 'All_eids.csv')
outer_folds_split.rename(columns={'fold': 'outer_fold'}, inplace=True)
outer_folds_split['eid'] = outer_folds_split['eid'].astype('str')
outer_folds_split['outer_fold'] = outer_folds_split['outer_fold'].astype('str')
outer_folds_split.set_index('eid', inplace=True)
self.data_raw = self.data_raw.join(outer_folds_split)
def _impute_missing_ecg_instances(self):
data_ecgs = pd.read_csv('/n/groups/patel/Alan/Aging/TimeSeries/scripts/age_analysis/missing_samples.csv')
data_ecgs['eid'] = data_ecgs['eid'].astype(str)
data_ecgs['instance'] = data_ecgs['instance'].astype(str)
for _, row in data_ecgs.iterrows():
self.data_raw.loc[row['eid'], 'Date_attended_center_' + row['instance']] = row['observation_date']
def _add_physicalactivity_instances(self):
data_pa = pd.read_csv(
'/n/groups/patel/Alan/Aging/TimeSeries/series/PhysicalActivity/90001/features/PA_visit_date.csv')
data_pa['eid'] = data_pa['eid'].astype(str)
data_pa.set_index('eid', drop=False, inplace=True)
data_pa.index.name = 'column_names'
self.data_raw = self.data_raw.merge(data_pa, on=['eid'], how='outer')
self.data_raw.set_index('eid', drop=False, inplace=True)
def _compute_sex(self):
# Use genetic sex when available
self.data_raw['Sex_genetic'][self.data_raw['Sex_genetic'].isna()] = \
self.data_raw['Sex'][self.data_raw['Sex_genetic'].isna()]
self.data_raw.drop(['Sex'], axis=1, inplace=True)
self.data_raw.rename(columns={'Sex_genetic': 'Sex'}, inplace=True)
self.data_raw.dropna(subset=['Sex'], inplace=True)
def _compute_age(self):
# Recompute age with greater precision by leveraging the month of birth
self.data_raw['Year_of_birth'] = self.data_raw['Year_of_birth'].astype(int)
self.data_raw['Month_of_birth'] = self.data_raw['Month_of_birth'].astype(int)
self.data_raw['Date_of_birth'] = self.data_raw.apply(
lambda row: datetime(row.Year_of_birth, row.Month_of_birth, 15), axis=1)
for i in self.instances:
self.data_raw['Date_attended_center_' + i] = \
self.data_raw['Date_attended_center_' + i].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
self.data_raw['Age_' + i] = self.data_raw['Date_attended_center_' + i] - self.data_raw['Date_of_birth']
self.data_raw['Age_' + i] = self.data_raw['Age_' + i].dt.days / 365.25
self.data_raw.drop(['Date_attended_center_' + i], axis=1, inplace=True)
self.data_raw.drop(['Year_of_birth', 'Month_of_birth', 'Date_of_birth'], axis=1, inplace=True)
self.data_raw.dropna(how='all', subset=['Age_0', 'Age_1', 'Age_1.5', 'Age_1.51', 'Age_1.52', 'Age_1.53',
'Age_1.54', 'Age_2', 'Age_3'], inplace=True)
def _encode_ethnicity(self):
# Fill NAs for ethnicity on instance 0 if available in other instances
eids_missing_ethnicity = self.data_raw['eid'][self.data_raw['Ethnicity'].isna()]
for eid in eids_missing_ethnicity:
sample = self.data_raw.loc[eid, :]
if not math.isnan(sample['Ethnicity_1']):
self.data_raw.loc[eid, 'Ethnicity'] = self.data_raw.loc[eid, 'Ethnicity_1']
elif not math.isnan(sample['Ethnicity_2']):
self.data_raw.loc[eid, 'Ethnicity'] = self.data_raw.loc[eid, 'Ethnicity_2']
self.data_raw.drop(['Ethnicity_1', 'Ethnicity_2'], axis=1, inplace=True)
# One hot encode ethnicity
dict_ethnicity_codes = {'1': 'Ethnicity.White', '1001': 'Ethnicity.British', '1002': 'Ethnicity.Irish',
'1003': 'Ethnicity.White_Other',
'2': 'Ethnicity.Mixed', '2001': 'Ethnicity.White_and_Black_Caribbean',
'2002': 'Ethnicity.White_and_Black_African',
'2003': 'Ethnicity.White_and_Asian', '2004': 'Ethnicity.Mixed_Other',
'3': 'Ethnicity.Asian', '3001': 'Ethnicity.Indian', '3002': 'Ethnicity.Pakistani',
'3003': 'Ethnicity.Bangladeshi', '3004': 'Ethnicity.Asian_Other',
'4': 'Ethnicity.Black', '4001': 'Ethnicity.Caribbean', '4002': 'Ethnicity.African',
'4003': 'Ethnicity.Black_Other',
'5': 'Ethnicity.Chinese',
'6': 'Ethnicity.Other_ethnicity',
'-1': 'Ethnicity.Do_not_know',
'-3': 'Ethnicity.Prefer_not_to_answer',
'-5': 'Ethnicity.NA'}
self.data_raw['Ethnicity'] = self.data_raw['Ethnicity'].fillna(-5).astype(int).astype(str)
ethnicities = pd.get_dummies(self.data_raw['Ethnicity'])
self.data_raw.drop(['Ethnicity'], axis=1, inplace=True)
ethnicities.rename(columns=dict_ethnicity_codes, inplace=True)
ethnicities['Ethnicity.White'] = ethnicities['Ethnicity.White'] + ethnicities['Ethnicity.British'] + \
ethnicities['Ethnicity.Irish'] + ethnicities['Ethnicity.White_Other']
ethnicities['Ethnicity.Mixed'] = ethnicities['Ethnicity.Mixed'] + \
ethnicities['Ethnicity.White_and_Black_Caribbean'] + \
ethnicities['Ethnicity.White_and_Black_African'] + \
ethnicities['Ethnicity.White_and_Asian'] + \
ethnicities['Ethnicity.Mixed_Other']
ethnicities['Ethnicity.Asian'] = ethnicities['Ethnicity.Asian'] + ethnicities['Ethnicity.Indian'] + \
ethnicities['Ethnicity.Pakistani'] + ethnicities['Ethnicity.Bangladeshi'] + \
ethnicities['Ethnicity.Asian_Other']
ethnicities['Ethnicity.Black'] = ethnicities['Ethnicity.Black'] + ethnicities['Ethnicity.Caribbean'] + \
ethnicities['Ethnicity.African'] + ethnicities['Ethnicity.Black_Other']
ethnicities['Ethnicity.Other'] = ethnicities['Ethnicity.Other_ethnicity'] + \
ethnicities['Ethnicity.Do_not_know'] + \
ethnicities['Ethnicity.Prefer_not_to_answer'] + \
ethnicities['Ethnicity.NA']
self.data_raw = self.data_raw.join(ethnicities)
def generate_data(self):
# Preprocessing
dict_UKB_fields_to_names = {'34-0.0': 'Year_of_birth', '52-0.0': 'Month_of_birth',
'53-0.0': 'Date_attended_center_0', '53-1.0': 'Date_attended_center_1',
'53-2.0': 'Date_attended_center_2', '53-3.0': 'Date_attended_center_3',
'31-0.0': 'Sex', '22001-0.0': 'Sex_genetic', '21000-0.0': 'Ethnicity',
'21000-1.0': 'Ethnicity_1', '21000-2.0': 'Ethnicity_2',
'22414-2.0': 'Abdominal_images_quality'}
self.data_raw = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv',
usecols=['eid', '31-0.0', '22001-0.0', '21000-0.0', '21000-1.0', '21000-2.0',
'34-0.0', '52-0.0', '53-0.0', '53-1.0', '53-2.0', '53-3.0', '22414-2.0'])
# Formatting
self.data_raw.rename(columns=dict_UKB_fields_to_names, inplace=True)
self.data_raw['eid'] = self.data_raw['eid'].astype(str)
self.data_raw.set_index('eid', drop=False, inplace=True)
self.data_raw.index.name = 'column_names'
self._add_outer_folds()
self._impute_missing_ecg_instances()
self._add_physicalactivity_instances()
self._compute_sex()
self._compute_age()
self._encode_ethnicity()
# Concatenate the data from the different instances
self.data_features = None
for i in self.instances:
print('Preparing the samples for instance ' + i)
df_i = self.data_raw[['eid', 'outer_fold', 'Age_' + i, 'Sex'] + self.ethnicities_vars +
['Abdominal_images_quality']].dropna(subset=['Age_' + i])
print(str(len(df_i.index)) + ' samples found in instance ' + i)
df_i.rename(columns={'Age_' + i: 'Age'}, inplace=True)
df_i['instance'] = i
df_i['id'] = df_i['eid'] + '_' + df_i['instance']
df_i = df_i[self.id_vars + self.demographic_vars + ['Abdominal_images_quality']]
if i != '2':
df_i['Abdominal_images_quality'] = np.nan # not defined for instance 3, not relevant for instances 0, 1
if self.data_features is None:
self.data_features = df_i
else:
self.data_features = self.data_features.append(df_i)
print('The size of the full concatenated dataframe is now ' + str(len(self.data_features.index)))
# Save age as a float32 instead of float64
self.data_features['Age'] = np.float32(self.data_features['Age'])
# Shuffle the rows before saving the dataframe
self.data_features = self.data_features.sample(frac=1)
# Generate dataframe for eids pipeline as opposed to instances pipeline
self.data_features_eids = self.data_features[self.data_features.instance == '0']
self.data_features_eids['instance'] = '*'
self.data_features_eids['id'] = [ID.replace('_0', '_*') for ID in self.data_features_eids['id'].values]
def save_data(self):
self.data_features.to_csv(self.path_data + 'data-features_instances.csv', index=False)
self.data_features_eids.to_csv(self.path_data + 'data-features_eids.csv', index=False)
class PreprocessingImagesIDs(Basics):
"""
Splits the different images datasets into folds for the future cross validation
"""
def __init__(self):
Basics.__init__(self)
# Instances 2 and 3 datasets (most medical images, mostly medical images)
self.instances23_eids = None
self.HEART_EIDs = None
self.heart_eids = None
self.FOLDS_23_EIDS = None
def _load_23_eids(self):
data_features = pd.read_csv(self.path_data + 'data-features_instances.csv')
images_eids = data_features['eid'][data_features['instance'].isin([2, 3])]
self.images_eids = list(set(images_eids))
def _load_heart_eids(self):
# IDs already used in Heart videos
HEART_EIDS = {}
heart_eids = []
for i in range(10):
# Important: The i's data fold is used as *validation* fold for outer fold i.
data_i = pd.read_csv(
"/n/groups/patel/JbProst/Heart/Data/FoldsAugmented/data-features_Heart_20208_Augmented_Age_val_" + str(
i) + ".csv")
HEART_EIDS[i] = list(set([int(str(ID)[:7]) for ID in data_i['eid']]))
heart_eids = heart_eids + HEART_EIDS[i]
self.HEART_EIDS = HEART_EIDS
self.heart_eids = heart_eids
def _split_23_eids_folds(self):
self._load_23_eids()
self._load_heart_eids()
# List extra images ids, and split them between the different folds.
extra_eids = [eid for eid in self.images_eids if eid not in self.heart_eids]
random.shuffle(extra_eids)
n_samples = len(extra_eids)
n_samples_by_fold = n_samples / self.n_CV_outer_folds
FOLDS_EXTRAEIDS = {}
FOLDS_EIDS = {}
for outer_fold in self.outer_folds:
FOLDS_EXTRAEIDS[outer_fold] = \
extra_eids[int((int(outer_fold)) * n_samples_by_fold):int((int(outer_fold) + 1) * n_samples_by_fold)]
FOLDS_EIDS[outer_fold] = self.HEART_EIDS[int(outer_fold)] + FOLDS_EXTRAEIDS[outer_fold]
self.FOLDS_23_EIDS = FOLDS_EIDS
def _save_23_eids_folds(self):
for outer_fold in self.outer_folds:
with open(self.path_data + 'instances23_eids_' + outer_fold + '.csv', 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(self.FOLDS_23_EIDS[outer_fold])
def generate_eids_splits(self):
print("Generating eids split for organs on instances 2 and 3")
self._split_23_eids_folds()
self._save_23_eids_folds()
class PreprocessingFolds(Metrics):
"""
Splits the data into training, validation and testing sets for all CV folds
"""
def __init__(self, target, organ, regenerate_data):
Metrics.__init__(self)
self.target = target
self.organ = organ
self.list_ids_per_view_transformation = None
# Check if these folds have already been generated
if not regenerate_data:
if len(glob.glob(self.path_data + 'data-features_' + organ + '_*_' + target + '_*.csv')) > 0:
print("Error: The files already exist! Either change regenerate_data to True or delete the previous"
" version.")
sys.exit(1)
self.side_predictors = self.dict_side_predictors[target]
self.variables_to_normalize = self.side_predictors
if target in self.targets_regression:
self.variables_to_normalize.append(target)
self.dict_image_quality_col = {'Liver': 'Abdominal_images_quality'}
self.dict_image_quality_col.update(
dict.fromkeys(['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal', 'PhysicalActivity'],
None))
self.image_quality_col = self.dict_image_quality_col[organ]
self.views = self.dict_organs_to_views[organ]
self.list_ids = None
self.list_ids_per_view = {}
self.data = None
self.EIDS = None
self.EIDS_per_view = {'train': {}, 'val': {}, 'test': {}}
self.data_fold = None
def _get_list_ids(self):
self.list_ids_per_view_transformation = {}
list_ids = []
# if different views are available, take the union of the ids
for view in self.views:
self.list_ids_per_view_transformation[view] = {}
for transformation in self.dict_organsviews_to_transformations[self.organ + '_' + view]:
list_ids_transformation = []
path = '../images/' + self.organ + '/' + view + '/' + transformation + '/'
# for paired organs, take the unions of the ids available on the right and the left sides
if self.organ + '_' + view in self.left_right_organs_views:
for side in ['right', 'left']:
list_ids_transformation += os.listdir(path + side + '/')
list_ids_transformation = np.unique(list_ids_transformation).tolist()
else:
list_ids_transformation += os.listdir(path)
self.list_ids_per_view_transformation[view][transformation] = \
[im.replace('.jpg', '') for im in list_ids_transformation]
list_ids += self.list_ids_per_view_transformation[view][transformation]
self.list_ids = np.unique(list_ids).tolist()
self.list_ids.sort()
def _filter_and_format_data(self):
"""
Clean the data before it can be split between the rows
"""
cols_data = self.id_vars + self.demographic_vars
if self.image_quality_col is not None:
cols_data.append(self.dict_image_quality_col[self.organ])
data = pd.read_csv(self.path_data + 'data-features_instances.csv', usecols=cols_data)
data.rename(columns={self.dict_image_quality_col[self.organ]: 'Data_quality'}, inplace=True)
for col_name in self.id_vars:
data[col_name] = data[col_name].astype(str)
data.set_index('id', drop=False, inplace=True)
if self.image_quality_col is not None:
data = data[data['Data_quality'] != np.nan]
data.drop('Data_quality', axis=1, inplace=True)
# get rid of samples with NAs
data.dropna(inplace=True)
# list the samples' ids for which images are available
data = data.loc[self.list_ids]
self.data = data
def _split_data(self):
# Generate the data for each outer_fold
for i, outer_fold in enumerate(self.outer_folds):
of_val = outer_fold
of_test = str((int(outer_fold) + 1) % len(self.outer_folds))
DATA = {
'train': self.data[~self.data['outer_fold'].isin([of_val, of_test])],
'val': self.data[self.data['outer_fold'] == of_val],
'test': self.data[self.data['outer_fold'] == of_test]
}
# Generate the data for the different views and transformations
for view in self.views:
for transformation in self.dict_organsviews_to_transformations[self.organ + '_' + view]:
print('Splitting data for view ' + view + ', and transformation ' + transformation)
DF = {}
for fold in self.folds:
idx = DATA[fold]['id'].isin(self.list_ids_per_view_transformation[view][transformation]).values
DF[fold] = DATA[fold].iloc[idx, :]
# compute values for scaling of variables
normalizing_values = {}
for var in self.variables_to_normalize:
var_mean = DF['train'][var].mean()
if len(DF['train'][var].unique()) < 2:
print('Variable ' + var + ' has a single value in fold ' + outer_fold +
'. Using 1 as std for normalization.')
var_std = 1
else:
var_std = DF['train'][var].std()
normalizing_values[var] = {'mean': var_mean, 'std': var_std}
# normalize the variables
for fold in self.folds:
for var in self.variables_to_normalize:
DF[fold][var + '_raw'] = DF[fold][var]
DF[fold][var] = (DF[fold][var] - normalizing_values[var]['mean']) \
/ normalizing_values[var]['std']
# report issue if NAs were detected (most likely comes from a sample whose id did not match)
n_mismatching_samples = DF[fold].isna().sum().max()
if n_mismatching_samples > 0:
print(DF[fold][DF[fold].isna().any(axis=1)])
print('/!\\ WARNING! ' + str(n_mismatching_samples) + ' ' + fold + ' images ids out of ' +
str(len(DF[fold].index)) + ' did not match the dataframe!')
# save the data
DF[fold].to_csv(self.path_data + 'data-features_' + self.organ + '_' + view + '_' +
transformation + '_' + self.target + '_' + fold + '_' + outer_fold + '.csv',
index=False)
print('For outer_fold ' + outer_fold + ', the ' + fold + ' fold has a sample size of ' +
str(len(DF[fold].index)))
def generate_folds(self):
self._get_list_ids()
self._filter_and_format_data()
self._split_data()
class PreprocessingSurvival(Basics):
"""
Preprocesses the main dataframe for survival purposes.
Mirrors the PreprocessingMain class, but computes Death time and FollowTime for the future survival analysis
"""
def __init__(self):
Basics.__init__(self)
self.data_raw = None
self.data_features = None
self.data_features_eids = None
self.survival_vars = ['FollowUpTime', 'Death']
def _preprocessing(self):
usecols = ['eid', '40000-0.0', '34-0.0', '52-0.0', '53-0.0', '53-1.0', '53-2.0', '53-3.0']
self.data_raw = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv', usecols=usecols)
dict_UKB_fields_to_names = {'40000-0.0': 'FollowUpDate', '34-0.0': 'Year_of_birth', '52-0.0': 'Month_of_birth',
'53-0.0': 'Date_attended_center_0', '53-1.0': 'Date_attended_center_1',
'53-2.0': 'Date_attended_center_2', '53-3.0': 'Date_attended_center_3'}
self.data_raw.rename(columns=dict_UKB_fields_to_names, inplace=True)
self.data_raw['eid'] = self.data_raw['eid'].astype(str)
self.data_raw.set_index('eid', drop=False, inplace=True)
self.data_raw.index.name = 'column_names'
# Format survival data
self.data_raw['Death'] = ~self.data_raw['FollowUpDate'].isna()
self.data_raw['FollowUpDate'][self.data_raw['FollowUpDate'].isna()] = '2020-04-27'
self.data_raw['FollowUpDate'] = self.data_raw['FollowUpDate'].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
assert ('FollowUpDate.1' not in self.data_raw.columns)
def _add_physicalactivity_instances(self):
data_pa = pd.read_csv(
'/n/groups/patel/Alan/Aging/TimeSeries/series/PhysicalActivity/90001/features/PA_visit_date.csv')
data_pa['eid'] = data_pa['eid'].astype(str)
data_pa.set_index('eid', drop=False, inplace=True)
data_pa.index.name = 'column_names'
self.data_raw = self.data_raw.merge(data_pa, on=['eid'], how='outer')
self.data_raw.set_index('eid', drop=False, inplace=True)
def _compute_age(self):
# Recompute age with greater precision by leveraging the month of birth
self.data_raw.dropna(subset=['Year_of_birth'], inplace=True)
self.data_raw['Year_of_birth'] = self.data_raw['Year_of_birth'].astype(int)
self.data_raw['Month_of_birth'] = self.data_raw['Month_of_birth'].astype(int)
self.data_raw['Date_of_birth'] = self.data_raw.apply(
lambda row: datetime(row.Year_of_birth, row.Month_of_birth, 15), axis=1)
for i in self.instances:
self.data_raw['Date_attended_center_' + i] = self.data_raw['Date_attended_center_' + i].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
self.data_raw['Age_' + i] = self.data_raw['Date_attended_center_' + i] - self.data_raw['Date_of_birth']
self.data_raw['Age_' + i] = self.data_raw['Age_' + i].dt.days / 365.25
self.data_raw['FollowUpTime_' + i] = self.data_raw['FollowUpDate'] - self.data_raw[
'Date_attended_center_' + i]
self.data_raw['FollowUpTime_' + i] = self.data_raw['FollowUpTime_' + i].dt.days / 365.25
self.data_raw.drop(['Date_attended_center_' + i], axis=1, inplace=True)
self.data_raw.drop(['Year_of_birth', 'Month_of_birth', 'Date_of_birth', 'FollowUpDate'], axis=1, inplace=True)
self.data_raw.dropna(how='all', subset=['Age_0', 'Age_1', 'Age_1.5', 'Age_1.51', 'Age_1.52', 'Age_1.53',
'Age_1.54', 'Age_2', 'Age_3'], inplace=True)
def _concatenate_instances(self):
self.data_features = None
for i in self.instances:
print('Preparing the samples for instance ' + i)
df_i = self.data_raw.dropna(subset=['Age_' + i])
print(str(len(df_i.index)) + ' samples found in instance ' + i)
dict_names = {}
features = ['Age', 'FollowUpTime']
for feature in features:
dict_names[feature + '_' + i] = feature
self.dict_names = dict_names
df_i.rename(columns=dict_names, inplace=True)
df_i['instance'] = i
df_i['id'] = df_i['eid'] + '_' + df_i['instance']
df_i = df_i[['id', 'eid', 'instance'] + self.survival_vars]
if self.data_features is None:
self.data_features = df_i
else:
self.data_features = self.data_features.append(df_i)
print('The size of the full concatenated dataframe is now ' + str(len(self.data_features.index)))
# Add * instance for eids
survival_eids = self.data_features[self.data_features['instance'] == '0']
survival_eids['instance'] = '*'
survival_eids['id'] = survival_eids['eid'] + '_' + survival_eids['instance']
self.data_features = self.data_features.append(survival_eids)
def generate_data(self):
# Formatting
self._preprocessing()
self._add_physicalactivity_instances()
self._compute_age()
self._concatenate_instances()
# save data
self.data_features.to_csv('../data/data_survival.csv', index=False)
class MyImageDataGenerator(Basics, Sequence, ImageDataGenerator):
"""
Helper class: custom data generator for images.
It handles several custom features such as:
- provides batches of not only images, but also the scalar data (e.g demographics) that correspond to it
- it performs random shuffling while making sure that no leftover data (the remainder of the modulo batch size)
is being unused
- it can handle paired data for paired organs (e.g left/right eyes)
"""
def __init__(self, target=None, organ=None, view=None, data_features=None, n_samples_per_subepoch=None,
batch_size=None, training_mode=None, side_predictors=None, dir_images=None, images_width=None,
images_height=None, data_augmentation=False, data_augmentation_factor=None, seed=None):
# Parameters
Basics.__init__(self)
self.target = target
if target in self.targets_regression:
self.labels = data_features[target]
else:
self.labels = data_features[target + '_raw']
self.organ = organ
self.view = view
self.training_mode = training_mode
self.data_features = data_features
self.list_ids = data_features.index.values
self.batch_size = batch_size
# for paired organs, take twice fewer ids (two images for each id), and add organ_side as side predictor
if organ + '_' + view in self.left_right_organs_views:
self.data_features['organ_side'] = np.nan
self.n_ids_batch = batch_size // 2
else:
self.n_ids_batch = batch_size
if self.training_mode & (n_samples_per_subepoch is not None): # during training, 1 epoch = number of samples
self.steps = math.ceil(n_samples_per_subepoch / batch_size)
else: # during prediction and other tasks, an epoch is defined as all the samples being seen once and only once
self.steps = math.ceil(len(self.list_ids) / self.n_ids_batch)
# learning_rate_patience
if n_samples_per_subepoch is not None:
self.n_subepochs_per_epoch = math.ceil(len(self.data_features.index) / n_samples_per_subepoch)
# initiate the indices and shuffle the ids
self.shuffle = training_mode # Only shuffle if the model is being trained. Otherwise no need.
self.indices = np.arange(len(self.list_ids))
self.idx_end = 0 # Keep track of last indice to permute indices accordingly at the end of epoch.
if self.shuffle:
np.random.shuffle(self.indices)
# Input for side NN and CNN
self.side_predictors = side_predictors
self.dir_images = dir_images
self.images_width = images_width
self.images_height = images_height
# Data augmentation
self.data_augmentation = data_augmentation
self.data_augmentation_factor = data_augmentation_factor
self.seed = seed
# Parameters for data augmentation: (rotation range, width shift range, height shift range, zoom range)
self.augmentation_parameters = \
pd.DataFrame(index=['Brain_MRI', 'Eyes_Fundus', 'Eyes_OCT', 'Arterial_Carotids', 'Heart_MRI',
'Abdomen_Liver', 'Abdomen_Pancreas', 'Musculoskeletal_Spine', 'Musculoskeletal_Hips',
'Musculoskeletal_Knees', 'Musculoskeletal_FullBody', 'PhysicalActivity_FullWeek',
'PhysicalActivity_Walking'],
columns=['rotation', 'width_shift', 'height_shift', 'zoom'])
self.augmentation_parameters.loc['Brain_MRI', :] = [10, 0.05, 0.1, 0.0]
self.augmentation_parameters.loc['Eyes_Fundus', :] = [20, 0.02, 0.02, 0]
self.augmentation_parameters.loc['Eyes_OCT', :] = [30, 0.1, 0.2, 0]
self.augmentation_parameters.loc[['Arterial_Carotids'], :] = [0, 0.2, 0.0, 0.0]
self.augmentation_parameters.loc[['Heart_MRI', 'Abdomen_Liver', 'Abdomen_Pancreas',
'Musculoskeletal_Spine'], :] = [10, 0.1, 0.1, 0.0]
self.augmentation_parameters.loc[['Musculoskeletal_Hips', 'Musculoskeletal_Knees'], :] = [10, 0.1, 0.1, 0.1]
self.augmentation_parameters.loc[['Musculoskeletal_FullBody'], :] = [10, 0.05, 0.02, 0.0]
self.augmentation_parameters.loc[['PhysicalActivity_FullWeek'], :] = [0, 0, 0, 0.0]
organ_view = organ + '_' + view
ImageDataGenerator.__init__(self, rescale=1. / 255.,
rotation_range=self.augmentation_parameters.loc[organ_view, 'rotation'],
width_shift_range=self.augmentation_parameters.loc[organ_view, 'width_shift'],
height_shift_range=self.augmentation_parameters.loc[organ_view, 'height_shift'],
zoom_range=self.augmentation_parameters.loc[organ_view, 'zoom'])
def __len__(self):
return self.steps
def on_epoch_end(self):
_ = gc.collect()
self.indices = np.concatenate([self.indices[self.idx_end:], self.indices[:self.idx_end]])
def _generate_image(self, path_image):
img = load_img(path_image, target_size=(self.images_width, self.images_height), color_mode='rgb')
Xi = img_to_array(img)
if hasattr(img, 'close'):
img.close()
if self.data_augmentation:
params = self.get_random_transform(Xi.shape)
Xi = self.apply_transform(Xi, params)
Xi = self.standardize(Xi)
return Xi
def _data_generation(self, list_ids_batch):
# initialize empty matrices
n_samples_batch = min(len(list_ids_batch), self.batch_size)
X = np.empty((n_samples_batch, self.images_width, self.images_height, 3)) * np.nan
x = np.empty((n_samples_batch, len(self.side_predictors))) * np.nan
y = np.empty((n_samples_batch, 1)) * np.nan
# fill the matrices sample by sample
for i, ID in enumerate(list_ids_batch):
y[i] = self.labels[ID]
x[i] = self.data_features.loc[ID, self.side_predictors]
if self.organ + '_' + self.view in self.left_right_organs_views:
if i % 2 == 0:
path = self.dir_images + 'right/'
x[i][-1] = 0
else:
path = self.dir_images + 'left/'
x[i][-1] = 1
if not os.path.exists(path + ID + '.jpg'):
path = path.replace('/right/', '/left/') if i % 2 == 0 else path.replace('/left/', '/right/')
x[i][-1] = 1 - x[i][-1]
else:
path = self.dir_images
X[i, :, :, :] = self._generate_image(path_image=path + ID + '.jpg')
return [X, x], y
def __getitem__(self, index):
# Select the indices
idx_start = (index * self.n_ids_batch) % len(self.list_ids)
idx_end = (((index + 1) * self.n_ids_batch) - 1) % len(self.list_ids) + 1
if idx_start > idx_end:
# If this happens outside of training, that is a mistake
if not self.training_mode:
print('\nERROR: Outside of training, every sample should only be predicted once!')
sys.exit(1)
# Select part of the indices from the end of the epoch
indices = self.indices[idx_start:]
# Generate a new set of indices
# print('\nThe end of the data was reached within this batch, looping.')
if self.shuffle:
np.random.shuffle(self.indices)
# Complete the batch with samples from the new indices
indices = np.concatenate([indices, self.indices[:idx_end]])
else:
indices = self.indices[idx_start: idx_end]
if idx_end == len(self.list_ids) & self.shuffle:
# print('\nThe end of the data was reached. Shuffling for the next epoch.')
np.random.shuffle(self.indices)
# Keep track of last indice for end of subepoch
self.idx_end = idx_end
# Select the corresponding ids
list_ids_batch = [self.list_ids[i] for i in indices]
# For paired organs, two images (left, right eyes) are selected for each id.
if self.organ + '_' + self.view in self.left_right_organs_views:
list_ids_batch = [ID for ID in list_ids_batch for _ in ('right', 'left')]
return self._data_generation(list_ids_batch)
class MyCSVLogger(Callback):
"""
Custom CSV Logger callback class for Keras training: append to existing file if can be found. Allows to keep track
of training over several jobs.
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
self.csv_file = None
if six.PY2:
self.file_flags = 'b'
self._open_args = {}
else:
self.file_flags = ''
self._open_args = {'newline': '\n'}
Callback.__init__(self)
def on_train_begin(self, logs=None):
if self.append:
if file_io.file_exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
mode = 'a'
else:
mode = 'w'
self.csv_file = io.open(self.filename, mode + self.file_flags, **self._open_args)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, collections_abc.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ['epoch', 'learning_rate'] + self.keys
if six.PY2:
fieldnames = [unicode(x) for x in fieldnames]
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=fieldnames,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({'epoch': epoch, 'learning_rate': eval(self.model.optimizer.lr)})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
class MyModelCheckpoint(ModelCheckpoint):
"""
Custom checkpoint callback class for Keras training. Handles a baseline performance.
"""
def __init__(self, filepath, monitor='val_loss', baseline=-np.Inf, verbose=0, save_best_only=False,
save_weights_only=False, mode='auto', save_freq='epoch'):
# Parameters
ModelCheckpoint.__init__(self, filepath, monitor=monitor, verbose=verbose, save_best_only=save_best_only,
save_weights_only=save_weights_only, mode=mode, save_freq=save_freq)
if mode == 'min':
self.monitor_op = np.less
self.best = baseline
elif mode == 'max':
self.monitor_op = np.greater
self.best = baseline
else:
print('Error. mode for metric must be either min or max')
sys.exit(1)
class DeepLearning(Metrics):
"""
Core helper class to train models. Used to:
- build the data generators
- generate the CNN architectures
- load the weights
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, debug_mode=False):
# Initialization
Metrics.__init__(self)
tf.random.set_seed(self.seed)
# Model's version
self.target = target
self.organ = organ
self.view = view
self.transformation = transformation
self.architecture = architecture
self.n_fc_layers = int(n_fc_layers)
self.n_fc_nodes = int(n_fc_nodes)
self.optimizer = optimizer
self.learning_rate = float(learning_rate)
self.weight_decay = float(weight_decay)
self.dropout_rate = float(dropout_rate)
self.data_augmentation_factor = float(data_augmentation_factor)
self.outer_fold = None
self.version = target + '_' + organ + '_' + view + '_' + transformation + '_' + architecture + '_' + \
n_fc_layers + '_' + n_fc_nodes + '_' + optimizer + '_' + learning_rate + '_' + weight_decay + \
'_' + dropout_rate + '_' + data_augmentation_factor
# NNet's architecture and weights
self.side_predictors = self.dict_side_predictors[target]
if self.organ + '_' + self.view in self.left_right_organs_views:
self.side_predictors.append('organ_side')
self.dict_final_activations = {'regression': 'linear', 'binary': 'sigmoid', 'multiclass': 'softmax',
'saliency': 'linear'}
self.path_load_weights = None
self.keras_weights = None
# Generators
self.debug_mode = debug_mode
self.debug_fraction = 0.005
self.DATA_FEATURES = {}
self.mode = None
self.n_cpus = len(os.sched_getaffinity(0))
self.dir_images = '../images/' + organ + '/' + view + '/' + transformation + '/'
# define dictionary to fit the architecture's input size to the images sizes (take min (height, width))
self.dict_organ_view_transformation_to_image_size = {
'Eyes_Fundus_Raw': (316, 316), # initial size (1388, 1388)
'Eyes_OCT_Raw': (312, 320), # initial size (500, 512)
'Musculoskeletal_Spine_Sagittal': (466, 211), # initial size (1513, 684)
'Musculoskeletal_Spine_Coronal': (315, 313), # initial size (724, 720)
'Musculoskeletal_Hips_MRI': (329, 303), # initial size (626, 680)
'Musculoskeletal_Knees_MRI': (347, 286) # initial size (851, 700)
}
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Brain_MRI_SagittalRaw', 'Brain_MRI_SagittalReference', 'Brain_MRI_CoronalRaw',
'Brain_MRI_CoronalReference', 'Brain_MRI_TransverseRaw', 'Brain_MRI_TransverseReference'],
(316, 316))) # initial size (88, 88)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Arterial_Carotids_Mixed', 'Arterial_Carotids_LongAxis', 'Arterial_Carotids_CIMT120',
'Arterial_Carotids_CIMT150', 'Arterial_Carotids_ShortAxis'],
(337, 291))) # initial size (505, 436)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Heart_MRI_2chambersRaw', 'Heart_MRI_2chambersContrast', 'Heart_MRI_3chambersRaw',
'Heart_MRI_3chambersContrast', 'Heart_MRI_4chambersRaw', 'Heart_MRI_4chambersContrast'],
(316, 316))) # initial size (200, 200)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Abdomen_Liver_Raw', 'Abdomen_Liver_Contrast'], (288, 364))) # initial size (288, 364)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Abdomen_Pancreas_Raw', 'Abdomen_Pancreas_Contrast'], (288, 350))) # initial size (288, 350)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Musculoskeletal_FullBody_Figure', 'Musculoskeletal_FullBody_Skeleton',
'Musculoskeletal_FullBody_Flesh', 'Musculoskeletal_FullBody_Mixed'],
(541, 181))) # initial size (811, 272)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['PhysicalActivity_FullWeek_GramianAngularField1minDifference',
'PhysicalActivity_FullWeek_GramianAngularField1minSummation',
'PhysicalActivity_FullWeek_MarkovTransitionField1min',
'PhysicalActivity_FullWeek_RecurrencePlots1min'],
(316, 316))) # initial size (316, 316)
self.dict_architecture_to_image_size = {'MobileNet': (224, 224), 'MobileNetV2': (224, 224),
'NASNetMobile': (224, 224), 'NASNetLarge': (331, 331)}
if self.architecture in ['MobileNet', 'MobileNetV2', 'NASNetMobile', 'NASNetLarge']:
self.image_width, self.image_height = self.dict_architecture_to_image_size[architecture]
else:
self.image_width, self.image_height = \
self.dict_organ_view_transformation_to_image_size[organ + '_' + view + '_' + transformation]
# define dictionary of batch sizes to fit as many samples as the model's architecture allows
self.dict_batch_sizes = {
# Default, applies to all images with resized input ~100,000 pixels
'Default': {'VGG16': 32, 'VGG19': 32, 'DenseNet121': 16, 'DenseNet169': 16, 'DenseNet201': 16,
'Xception': 32, 'InceptionV3': 32, 'InceptionResNetV2': 8, 'ResNet50': 32, 'ResNet101': 16,
'ResNet152': 16, 'ResNet50V2': 32, 'ResNet101V2': 16, 'ResNet152V2': 16, 'ResNeXt50': 4,
'ResNeXt101': 8, 'EfficientNetB7': 4,
'MobileNet': 128, 'MobileNetV2': 64, 'NASNetMobile': 64, 'NASNetLarge': 4}}
# Define batch size
if organ + '_' + view in self.dict_batch_sizes.keys():
randoself.batch_size = self.dict_batch_sizes[organ + '_' + view][architecture]
else:
self.batch_size = self.dict_batch_sizes['Default'][architecture]
# double the batch size for the teslaM40 cores that have bigger memory
if len(GPUtil.getGPUs()) > 0: # make sure GPUs are available (not truesometimes for debugging)
if GPUtil.getGPUs()[0].memoryTotal > 20000:
self.batch_size *= 2
# Define number of ids per batch (twice fewer for paired organs, because left and right samples)
self.n_ids_batch = self.batch_size
if organ + '_' + view in self.left_right_organs_views:
self.n_ids_batch //= 2
# Define number of samples per subepoch
if debug_mode:
self.n_samples_per_subepoch = self.batch_size * 4
else:
self.n_samples_per_subepoch = 32768
if organ + '_' + view in self.left_right_organs_views:
self.n_samples_per_subepoch //= 2
# dict to decide which field is used to generate the ids when several targets share the same ids
self.dict_target_to_ids = dict.fromkeys(['Age', 'Sex'], 'Age')
# Note: R-Squared and F1-Score are not available, because their batch based values are misleading.
# For some reason, Sensitivity and Specificity are not available either. Might implement later.
self.dict_losses_K = {'MSE': MeanSquaredError(name='MSE'),
'Binary-Crossentropy': BinaryCrossentropy(name='Binary-Crossentropy')}
self.dict_metrics_K = {'R-Squared': RSquare(name='R-Squared', y_shape=(1,)),
'RMSE': RootMeanSquaredError(name='RMSE'),
'F1-Score': F1Score(name='F1-Score', num_classes=1, dtype=tf.float32),
'ROC-AUC': AUC(curve='ROC', name='ROC-AUC'),
'PR-AUC': AUC(curve='PR', name='PR-AUC'),
'Binary-Accuracy': BinaryAccuracy(name='Binary-Accuracy'),
'Precision': Precision(name='Precision'),
'Recall': Recall(name='Recall'),
'True-Positives': TruePositives(name='True-Positives'),
'False-Positives': FalsePositives(name='False-Positives'),
'False-Negatives': FalseNegatives(name='False-Negatives'),
'True-Negatives': TrueNegatives(name='True-Negatives')}
# Metrics
self.prediction_type = self.dict_prediction_types[target]
self.loss_name = self.dict_losses_names[self.prediction_type]
self.loss_function = self.dict_losses_K[self.loss_name]
self.main_metric_name = self.dict_main_metrics_names_K[target]
self.main_metric_mode = self.main_metrics_modes[self.main_metric_name]
self.main_metric = self.dict_metrics_K[self.main_metric_name]
self.metrics_names = [self.main_metric_name]
self.metrics = [self.dict_metrics_K[metric_name] for metric_name in self.metrics_names]
# Optimizers
self.optimizers = {'Adam': Adam, 'RMSprop': RMSprop, 'Adadelta': Adadelta}
# Model
self.model = None
@staticmethod
def _append_ext(fn):
return fn + ".jpg"
def _load_data_features(self):
for fold in self.folds:
self.DATA_FEATURES[fold] = pd.read_csv(
self.path_data + 'data-features_' + self.organ + '_' + self.view + '_' + self.transformation + '_' +
self.dict_target_to_ids[self.target] + '_' + fold + '_' + self.outer_fold + '.csv')
for col_name in self.id_vars:
self.DATA_FEATURES[fold][col_name] = self.DATA_FEATURES[fold][col_name].astype(str)
self.DATA_FEATURES[fold].set_index('id', drop=False, inplace=True)
def _take_subset_to_debug(self):
for fold in self.folds:
# use +1 or +2 to test the leftovers pipeline
leftovers_extra = {'train': 0, 'val': 1, 'test': 2}
n_batches = 2
n_limit_fold = leftovers_extra[fold] + self.batch_size * n_batches
self.DATA_FEATURES[fold] = self.DATA_FEATURES[fold].iloc[:n_limit_fold, :]
def _generate_generators(self, DATA_FEATURES):
GENERATORS = {}
for fold in self.folds:
# do not generate a generator if there are no samples (can happen for leftovers generators)
if fold not in DATA_FEATURES.keys():
continue
# parameters
training_mode = True if self.mode == 'model_training' else False
if (fold == 'train') & (self.mode == 'model_training') & \
(self.organ + '_' + self.view not in self.organsviews_not_to_augment):
data_augmentation = True
else:
data_augmentation = False
# define batch size for testing: data is split between a part that fits in batches, and leftovers
if self.mode == 'model_testing':
if self.organ + '_' + self.view in self.left_right_organs_views:
n_samples = len(DATA_FEATURES[fold].index) * 2
else:
n_samples = len(DATA_FEATURES[fold].index)
batch_size_fold = min(self.batch_size, n_samples)
else:
batch_size_fold = self.batch_size
if (fold == 'train') & (self.mode == 'model_training'):
n_samples_per_subepoch = self.n_samples_per_subepoch
else:
n_samples_per_subepoch = None
# generator
GENERATORS[fold] = \
MyImageDataGenerator(target=self.target, organ=self.organ, view=self.view,
data_features=DATA_FEATURES[fold], n_samples_per_subepoch=n_samples_per_subepoch,
batch_size=batch_size_fold, training_mode=training_mode,
side_predictors=self.side_predictors, dir_images=self.dir_images,
images_width=self.image_width, images_height=self.image_height,
data_augmentation=data_augmentation,
data_augmentation_factor=self.data_augmentation_factor, seed=self.seed)
return GENERATORS
def _generate_class_weights(self):
if self.dict_prediction_types[self.target] == 'binary':
self.class_weights = {}
counts = self.DATA_FEATURES['train'][self.target + '_raw'].value_counts()
n_total = counts.sum()
# weighting the samples for each class inversely proportional to their prevalence, with order of magnitude 1
for i in counts.index.values:
self.class_weights[i] = n_total / (counts.loc[i] * len(counts.index))
def _generate_cnn(self):
# define the arguments
# take special initial weights for EfficientNetB7 (better)
if (self.architecture == 'EfficientNetB7') & (self.keras_weights == 'imagenet'):
w = 'noisy-student'
else:
w = self.keras_weights
kwargs = {"include_top": False, "weights": w, "input_shape": (self.image_width, self.image_height, 3)}
if self.architecture in ['ResNet50', 'ResNet101', 'ResNet152', 'ResNet50V2', 'ResNet101V2', 'ResNet152V2',
'ResNeXt50', 'ResNeXt101']:
import tensorflow.keras
kwargs.update(
{"backend": tensorflow.keras.backend, "layers": tensorflow.keras.layers,
"models": tensorflow.keras.models, "utils": tensorflow.keras.utils})
# load the architecture builder
if self.architecture == 'VGG16':
from tensorflow.keras.applications.vgg16 import VGG16 as ModelBuilder
elif self.architecture == 'VGG19':
from tensorflow.keras.applications.vgg19 import VGG19 as ModelBuilder
elif self.architecture == 'DenseNet121':
from tensorflow.keras.applications.densenet import DenseNet121 as ModelBuilder
elif self.architecture == 'DenseNet169':
from tensorflow.keras.applications.densenet import DenseNet169 as ModelBuilder
elif self.architecture == 'DenseNet201':
from tensorflow.keras.applications.densenet import DenseNet201 as ModelBuilder
elif self.architecture == 'Xception':
from tensorflow.keras.applications.xception import Xception as ModelBuilder
elif self.architecture == 'InceptionV3':
from tensorflow.keras.applications.inception_v3 import InceptionV3 as ModelBuilder
elif self.architecture == 'InceptionResNetV2':
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 as ModelBuilder
elif self.architecture == 'ResNet50':
from keras_applications.resnet import ResNet50 as ModelBuilder
elif self.architecture == 'ResNet101':
from keras_applications.resnet import ResNet101 as ModelBuilder
elif self.architecture == 'ResNet152':
from keras_applications.resnet import ResNet152 as ModelBuilder
elif self.architecture == 'ResNet50V2':
from keras_applications.resnet_v2 import ResNet50V2 as ModelBuilder
elif self.architecture == 'ResNet101V2':
from keras_applications.resnet_v2 import ResNet101V2 as ModelBuilder
elif self.architecture == 'ResNet152V2':
from keras_applications.resnet_v2 import ResNet152V2 as ModelBuilder
elif self.architecture == 'ResNeXt50':
from keras_applications.resnext import ResNeXt50 as ModelBuilder
elif self.architecture == 'ResNeXt101':
from keras_applications.resnext import ResNeXt101 as ModelBuilder
elif self.architecture == 'EfficientNetB7':
from efficientnet.tfkeras import EfficientNetB7 as ModelBuilder
# The following model have a fixed input size requirement
elif self.architecture == 'NASNetMobile':
from tensorflow.keras.applications.nasnet import NASNetMobile as ModelBuilder
elif self.architecture == 'NASNetLarge':
from tensorflow.keras.applications.nasnet import NASNetLarge as ModelBuilder
elif self.architecture == 'MobileNet':
from tensorflow.keras.applications.mobilenet import MobileNet as ModelBuilder
elif self.architecture == 'MobileNetV2':
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2 as ModelBuilder
else:
print('Architecture does not exist.')
sys.exit(1)
# build the model's base
cnn = ModelBuilder(**kwargs)
x = cnn.output
# complete the model's base
if self.architecture in ['VGG16', 'VGG19']:
x = Flatten()(x)
x = Dense(4096, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
x = Dropout(self.dropout_rate)(x)
x = Dense(4096, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
x = Dropout(self.dropout_rate)(x)
else:
x = GlobalAveragePooling2D()(x)
if self.architecture == 'EfficientNetB7':
x = Dropout(self.dropout_rate)(x)
cnn_output = x
return cnn.input, cnn_output
def _generate_side_nn(self):
side_nn = Sequential()
side_nn.add(Dense(16, input_dim=len(self.side_predictors), activation="relu",
kernel_regularizer=regularizers.l2(self.weight_decay)))
return side_nn.input, side_nn.output
def _complete_architecture(self, cnn_input, cnn_output, side_nn_input, side_nn_output):
x = concatenate([cnn_output, side_nn_output])
x = Dropout(self.dropout_rate)(x)
for n in [int(self.n_fc_nodes * (2 ** (2 * (self.n_fc_layers - 1 - i)))) for i in range(self.n_fc_layers)]:
x = Dense(n, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
# scale the dropout proportionally to the number of nodes in a layer. No dropout for the last layers
if n > 16:
x = Dropout(self.dropout_rate * n / 1024)(x)
predictions = Dense(1, activation=self.dict_final_activations[self.prediction_type],
kernel_regularizer=regularizers.l2(self.weight_decay))(x)
self.model = Model(inputs=[cnn_input, side_nn_input], outputs=predictions)
def _generate_architecture(self):
cnn_input, cnn_output = self._generate_cnn()
side_nn_input, side_nn_output = self._generate_side_nn()
self._complete_architecture(cnn_input=cnn_input, cnn_output=cnn_output, side_nn_input=side_nn_input,
side_nn_output=side_nn_output)
def _load_model_weights(self):
try:
self.model.load_weights(self.path_load_weights)
except (FileNotFoundError, TypeError):
# load backup weights if the main weights are corrupted
try:
self.model.load_weights(self.path_load_weights.replace('model-weights', 'backup-model-weights'))
except FileNotFoundError:
print('Error. No file was found. imagenet weights should have been used. Bug somewhere.')
sys.exit(1)
@staticmethod
def clean_exit():
# exit
print('\nDone.\n')
print('Killing JOB PID with kill...')
os.system('touch ../eo/' + os.environ['SLURM_JOBID'])
os.system('kill ' + str(os.getpid()))
time.sleep(60)
print('Escalating to kill JOB PID with kill -9...')
os.system('kill -9 ' + str(os.getpid()))
time.sleep(60)
print('Escalating to kill JOB ID')
os.system('scancel ' + os.environ['SLURM_JOBID'])
time.sleep(60)
print('Everything failed to kill the job. Hanging there until hitting walltime...')
class Training(DeepLearning):
"""
Class to train CNN models:
- Generates the architecture
- Loads the best last weights so that a model can be trained over several jobs
- Generates the callbacks
- Compiles the model
- Trains the model
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, outer_fold=None, debug_mode=False, transfer_learning=None,
continue_training=True, display_full_metrics=True):
# parameters
DeepLearning.__init__(self, target, organ, view, transformation, architecture, n_fc_layers, n_fc_nodes,
optimizer, learning_rate, weight_decay, dropout_rate, data_augmentation_factor,
debug_mode)
self.outer_fold = outer_fold
self.version = self.version + '_' + str(outer_fold)
# NNet's architecture's weights
self.continue_training = continue_training
self.transfer_learning = transfer_learning
self.list_parameters_to_match = ['organ', 'transformation', 'view']
# dict to decide in which order targets should be used when trying to transfer weight from a similar model
self.dict_alternative_targets_for_transfer_learning = {'Age': ['Age', 'Sex'], 'Sex': ['Sex', 'Age']}
# Generators
self.folds = ['train', 'val']
self.mode = 'model_training'
self.class_weights = None
self.GENERATORS = None
# Metrics
self.baseline_performance = None
if display_full_metrics:
self.metrics_names = self.dict_metrics_names_K[self.prediction_type]
# Model
self.path_load_weights = self.path_data + 'model-weights_' + self.version + '.h5'
if debug_mode:
self.path_save_weights = self.path_data + 'model-weights-debug.h5'
else:
self.path_save_weights = self.path_data + 'model-weights_' + self.version + '.h5'
self.n_epochs_max = 100000
self.callbacks = None
# Load and preprocess the data, build the generators
def data_preprocessing(self):
self._load_data_features()
if self.debug_mode:
self._take_subset_to_debug()
self._generate_class_weights()
self.GENERATORS = self._generate_generators(self.DATA_FEATURES)
# Determine which weights to load, if any.
def _weights_for_transfer_learning(self):
print('Looking for models to transfer weights from...')
# define parameters
parameters = self._version_to_parameters(self.version)
# continue training if possible
if self.continue_training and os.path.exists(self.path_load_weights):
print('Loading the weights from the model\'s previous training iteration.')
return
# Initialize the weights using other the weights from other successful hyperparameters combinations
if self.transfer_learning == 'hyperparameters':
# Check if the same model with other hyperparameters have already been trained. Pick the best for transfer.
params = self.version.split('_')
params_tl_idx = \
[i for i in range(len(names_model_parameters))
if any(names_model_parameters[i] == p for p in
['optimizer', 'learning_rate', 'weight_decay', 'dropout_rate', 'data_augmentation_factor'])]
for idx in params_tl_idx:
params[idx] = '*'
versions = '../eo/MI02_' + '_'.join(params) + '.out'
files = glob.glob(versions)
if self.main_metric_mode == 'min':
best_perf = np.Inf
else:
best_perf = -np.Inf
for file in files:
hand = open(file, 'r')
# find best last performance
final_improvement_line = None
baseline_performance_line = None
for line in hand:
line = line.rstrip()
if re.search('Baseline validation ' + self.main_metric_name + ' = ', line):
baseline_performance_line = line
if re.search('val_' + self.main_metric_name + ' improved from', line):
final_improvement_line = line
hand.close()
if final_improvement_line is not None:
perf = float(final_improvement_line.split(' ')[7].replace(',', ''))
elif baseline_performance_line is not None:
perf = float(baseline_performance_line.split(' ')[-1])
else:
continue
# Keep track of the file with the best performance
if self.main_metric_mode == 'min':
update = perf < best_perf
else:
update = perf > best_perf
if update:
best_perf = perf
self.path_load_weights = \
file.replace('../eo/', self.path_data).replace('MI02', 'model-weights').replace('.out', '.h5')
if best_perf not in [-np.Inf, np.Inf]:
print('Transfering the weights from: ' + self.path_load_weights + ', with ' + self.main_metric_name +
' = ' + str(best_perf))
return
# Initialize the weights based on models trained on different datasets, ranked by similarity
if self.transfer_learning == 'datasets':
while True:
# print('Matching models for the following criterias:');
# print(['architecture', 'target'] + list_parameters_to_match)
# start by looking for models trained on the same target, then move to other targets
for target_to_load in self.dict_alternative_targets_for_transfer_learning[parameters['target']]:
# print('Target used: ' + target_to_load)
parameters_to_match = parameters.copy()
parameters_to_match['target'] = target_to_load
# load the ranked performances table to select the best performing model among the similar
# models available
path_performances_to_load = self.path_data + 'PERFORMANCES_ranked_' + \
parameters_to_match['target'] + '_' + 'val' + '.csv'
try:
Performances = pd.read_csv(path_performances_to_load)
Performances['organ'] = Performances['organ'].astype(str)
except FileNotFoundError:
# print("Could not load the file: " + path_performances_to_load)
break
# iteratively get rid of models that are not similar enough, based on the list
for parameter in ['architecture', 'target'] + self.list_parameters_to_match:
Performances = Performances[Performances[parameter] == parameters_to_match[parameter]]
# if at least one model is similar enough, load weights from the best of them
if len(Performances.index) != 0:
self.path_load_weights = self.path_data + 'model-weights_' + Performances['version'][0] + '.h5'
self.keras_weights = None
print('transfering the weights from: ' + self.path_load_weights)
return
# if no similar model was found, try again after getting rid of the last selection criteria
if len(self.list_parameters_to_match) == 0:
print('No model found for transfer learning.')
break
self.list_parameters_to_match.pop()
# Otherwise use imagenet weights to initialize
print('Using imagenet weights.')
# using string instead of None for path to not ge
self.path_load_weights = None
self.keras_weights = 'imagenet'
def _compile_model(self):
# if learning rate was reduced with success according to logger, start with this reduced learning rate
if self.path_load_weights is not None:
path_logger = self.path_load_weights.replace('model-weights', 'logger').replace('.h5', '.csv')
else:
path_logger = self.path_data + 'logger_' + self.version + '.csv'
if os.path.exists(path_logger):
try:
logger = pd.read_csv(path_logger)
best_log = \
logger[logger['val_' + self.main_metric_name] == logger['val_' + self.main_metric_name].max()]
lr = best_log['learning_rate'].values[0]
except pd.errors.EmptyDataError:
os.remove(path_logger)
lr = self.learning_rate
else:
lr = self.learning_rate
self.model.compile(optimizer=self.optimizers[self.optimizer](lr=lr, clipnorm=1.0), loss=self.loss_function,
metrics=self.metrics)
def _compute_baseline_performance(self):
# calculate initial val_loss value
if self.continue_training:
idx_metric_name = ([self.loss_name] + self.metrics_names).index(self.main_metric_name)
baseline_perfs = self.model.evaluate(self.GENERATORS['val'], steps=self.GENERATORS['val'].steps)
self.baseline_performance = baseline_perfs[idx_metric_name]
elif self.main_metric_mode == 'min':
self.baseline_performance = np.Inf
else:
self.baseline_performance = -np.Inf
print('Baseline validation ' + self.main_metric_name + ' = ' + str(self.baseline_performance))
def _define_callbacks(self):
if self.debug_mode:
path_logger = self.path_data + 'logger-debug.csv'
append = False
else:
path_logger = self.path_data + 'logger_' + self.version + '.csv'
append = self.continue_training
csv_logger = MyCSVLogger(path_logger, separator=',', append=append)
model_checkpoint_backup = MyModelCheckpoint(self.path_save_weights.replace('model-weights',
'backup-model-weights'),
monitor='val_' + self.main_metric.name,
baseline=self.baseline_performance, verbose=1, save_best_only=True,
save_weights_only=True, mode=self.main_metric_mode,
save_freq='epoch')
model_checkpoint = MyModelCheckpoint(self.path_save_weights,
monitor='val_' + self.main_metric.name, baseline=self.baseline_performance,
verbose=1, save_best_only=True, save_weights_only=True,
mode=self.main_metric_mode, save_freq='epoch')
patience_reduce_lr = min(7, 3 * self.GENERATORS['train'].n_subepochs_per_epoch)
reduce_lr_on_plateau = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=patience_reduce_lr, verbose=1,
mode='min', min_delta=0, cooldown=0, min_lr=0)
early_stopping = EarlyStopping(monitor='val_' + self.main_metric.name, min_delta=0, patience=15, verbose=0,
mode=self.main_metric_mode, baseline=self.baseline_performance,
restore_best_weights=True)
self.callbacks = [csv_logger, model_checkpoint_backup, model_checkpoint, early_stopping, reduce_lr_on_plateau]
def build_model(self):
self._weights_for_transfer_learning()
self._generate_architecture()
# Load weights if possible
try:
load_weights = True if os.path.exists(self.path_load_weights) else False
except TypeError:
load_weights = False
if load_weights:
self._load_model_weights()
else:
# save transferred weights as default, in case no better weights are found
self.model.save_weights(self.path_save_weights.replace('model-weights', 'backup-model-weights'))
self.model.save_weights(self.path_save_weights)
self._compile_model()
self._compute_baseline_performance()
self._define_callbacks()
def train_model(self):
# garbage collector
_ = gc.collect()
# use more verbose when debugging
verbose = 1 if self.debug_mode else 2
# train the model
self.model.fit(self.GENERATORS['train'], steps_per_epoch=self.GENERATORS['train'].steps,
validation_data=self.GENERATORS['val'], validation_steps=self.GENERATORS['val'].steps,
shuffle=False, use_multiprocessing=False, workers=self.n_cpus, epochs=self.n_epochs_max,
class_weight=self.class_weights, callbacks=self.callbacks, verbose=verbose)
class PredictionsGenerate(DeepLearning):
"""
Generates the predictions for each model.
Unscales the predictions.
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, outer_fold=None, debug_mode=False):
# Initialize parameters
DeepLearning.__init__(self, target, organ, view, transformation, architecture, n_fc_layers, n_fc_nodes,
optimizer, learning_rate, weight_decay, dropout_rate, data_augmentation_factor,
debug_mode)
self.outer_fold = outer_fold
self.mode = 'model_testing'
# Define dictionaries attributes for data, generators and predictions
self.DATA_FEATURES_BATCH = {}
self.DATA_FEATURES_LEFTOVERS = {}
self.GENERATORS_BATCH = None
self.GENERATORS_LEFTOVERS = None
self.PREDICTIONS = {}
def _split_batch_leftovers(self):
# split the samples into two groups: what can fit into the batch size, and the leftovers.
for fold in self.folds:
n_leftovers = len(self.DATA_FEATURES[fold].index) % self.n_ids_batch
if n_leftovers > 0:
self.DATA_FEATURES_BATCH[fold] = self.DATA_FEATURES[fold].iloc[:-n_leftovers]
self.DATA_FEATURES_LEFTOVERS[fold] = self.DATA_FEATURES[fold].tail(n_leftovers)
else:
self.DATA_FEATURES_BATCH[fold] = self.DATA_FEATURES[fold] # special case for syntax if no leftovers
if fold in self.DATA_FEATURES_LEFTOVERS.keys():
del self.DATA_FEATURES_LEFTOVERS[fold]
def _generate_outerfold_predictions(self):
# prepare unscaling
if self.target in self.targets_regression:
mean_train = self.DATA_FEATURES['train'][self.target + '_raw'].mean()
std_train = self.DATA_FEATURES['train'][self.target + '_raw'].std()
else:
mean_train, std_train = None, None
# Generate predictions
for fold in self.folds:
print('Predicting samples from fold ' + fold + '.')
print(str(len(self.DATA_FEATURES[fold].index)) + ' samples to predict.')
print('Predicting batches: ' + str(len(self.DATA_FEATURES_BATCH[fold].index)) + ' samples.')
pred_batch = self.model.predict(self.GENERATORS_BATCH[fold], steps=self.GENERATORS_BATCH[fold].steps,
verbose=1)
if fold in self.GENERATORS_LEFTOVERS.keys():
print('Predicting leftovers: ' + str(len(self.DATA_FEATURES_LEFTOVERS[fold].index)) + ' samples.')
pred_leftovers = self.model.predict(self.GENERATORS_LEFTOVERS[fold],
steps=self.GENERATORS_LEFTOVERS[fold].steps, verbose=1)
pred_full = np.concatenate((pred_batch, pred_leftovers)).squeeze()
else:
pred_full = pred_batch.squeeze()
print('Predicted a total of ' + str(len(pred_full)) + ' samples.')
# take the average between left and right predictions for paired organs
if self.organ + '_' + self.view in self.left_right_organs_views:
pred_full = np.mean(pred_full.reshape(-1, 2), axis=1)
# unscale predictions
if self.target in self.targets_regression:
pred_full = pred_full * std_train + mean_train
# format the dataframe
self.DATA_FEATURES[fold]['pred'] = pred_full
self.PREDICTIONS[fold] = self.DATA_FEATURES[fold]
self.PREDICTIONS[fold]['id'] = [ID.replace('.jpg', '') for ID in self.PREDICTIONS[fold]['id']]
def _generate_predictions(self):
self.path_load_weights = self.path_data + 'model-weights_' + self.version + '_' + self.outer_fold + '.h5'
self._load_data_features()
if self.debug_mode:
self._take_subset_to_debug()
self._load_model_weights()
self._split_batch_leftovers()
# generate the generators
self.GENERATORS_BATCH = self._generate_generators(DATA_FEATURES=self.DATA_FEATURES_BATCH)
if self.DATA_FEATURES_LEFTOVERS is not None:
self.GENERATORS_LEFTOVERS = self._generate_generators(DATA_FEATURES=self.DATA_FEATURES_LEFTOVERS)
self._generate_outerfold_predictions()
def _format_predictions(self):
for fold in self.folds:
perf_fun = self.dict_metrics_sklearn[self.dict_main_metrics_names[self.target]]
perf = perf_fun(self.PREDICTIONS[fold][self.target + '_raw'], self.PREDICTIONS[fold]['pred'])
print('The ' + fold + ' performance is: ' + str(perf))
# format the predictions
self.PREDICTIONS[fold].index.name = 'column_names'
self.PREDICTIONS[fold] = self.PREDICTIONS[fold][['id', 'outer_fold', 'pred']]
def generate_predictions(self):
self._generate_architecture()
self._generate_predictions()
self._format_predictions()
def save_predictions(self):
for fold in self.folds:
self.PREDICTIONS[fold].to_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold + '_'
+ self.outer_fold + '.csv', index=False)
class PredictionsConcatenate(Basics):
"""
Concatenates the predictions coming from the different cross validation folds.
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None):
# Initialize parameters
Basics.__init__(self)
self.version = target + '_' + organ + '_' + view + '_' + transformation + '_' + architecture + '_' + \
n_fc_layers + '_' + n_fc_nodes + '_' + optimizer + '_' + learning_rate + '_' + weight_decay + \
'_' + dropout_rate + '_' + data_augmentation_factor
# Define dictionaries attributes for data, generators and predictions
self.PREDICTIONS = {}
def concatenate_predictions(self):
for fold in self.folds:
for outer_fold in self.outer_folds:
Predictions_fold = pd.read_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold +
'_' + outer_fold + '.csv')
if fold in self.PREDICTIONS.keys():
self.PREDICTIONS[fold] = pd.concat([self.PREDICTIONS[fold], Predictions_fold])
else:
self.PREDICTIONS[fold] = Predictions_fold
def save_predictions(self):
for fold in self.folds:
self.PREDICTIONS[fold].to_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold +
'.csv', index=False)
class PredictionsMerge(Basics):
"""
Merges the predictions from all models into a unified dataframe.
"""
def __init__(self, target=None, fold=None):
Basics.__init__(self)
# Define dictionaries attributes for data, generators and predictions
self.target = target
self.fold = fold
self.data_features = None
self.list_models = None
self.Predictions_df_previous = None
self.Predictions_df = None
def _load_data_features(self):
self.data_features = pd.read_csv(self.path_data + 'data-features_instances.csv',
usecols=self.id_vars + self.demographic_vars)
for var in self.id_vars:
self.data_features[var] = self.data_features[var].astype(str)
self.data_features.set_index('id', drop=False, inplace=True)
self.data_features.index.name = 'column_names'
def _preprocess_data_features(self):
# For the training set, each sample is predicted n_CV_outer_folds times, so prepare a larger dataframe
if self.fold == 'train':
df_all_folds = None
for outer_fold in self.outer_folds:
df_fold = self.data_features.copy()
df_all_folds = df_fold if outer_fold == self.outer_folds[0] else df_all_folds.append(df_fold)
self.data_features = df_all_folds
def _load_previous_merged_predictions(self):
if os.path.exists(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' + self.fold +
'.csv'):
self.Predictions_df_previous = pd.read_csv(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' +
self.target + '_' + self.fold + '.csv')
self.Predictions_df_previous.drop(columns=['eid', 'instance'] + self.demographic_vars, inplace=True)
def _list_models(self):
# generate list of predictions that will be integrated in the Predictions dataframe
self.list_models = glob.glob(self.path_data + 'Predictions_instances_' + self.target + '_*_' + self.fold +
'.csv')
# get rid of ensemble models and models already merged
self.list_models = [model for model in self.list_models if ('*' not in model)]
if self.Predictions_df_previous is not None:
self.list_models = \
[model for model in self.list_models
if ('pred_' + '_'.join(model.split('_')[2:-1]) not in self.Predictions_df_previous.columns)]
self.list_models.sort()
def preprocessing(self):
self._load_data_features()
self._preprocess_data_features()
self._load_previous_merged_predictions()
self._list_models()
def merge_predictions(self):
# merge the predictions
print('There are ' + str(len(self.list_models)) + ' models to merge.')
i = 0
# define subgroups to accelerate merging process
list_subgroups = list(set(['_'.join(model.split('_')[3:7]) for model in self.list_models]))
for subgroup in list_subgroups:
print('Merging models from the subgroup ' + subgroup)
models_subgroup = [model for model in self.list_models if subgroup in model]
Predictions_subgroup = None
# merge the models one by one
for file_name in models_subgroup:
i += 1
version = '_'.join(file_name.split('_')[2:-1])
if self.Predictions_df_previous is not None and \
'pred_' + version in self.Predictions_df_previous.columns:
print('The model ' + version + ' has already been merged before.')
else:
print('Merging the ' + str(i) + 'th model: ' + version)
# load csv and format the predictions
prediction = pd.read_csv(self.path_data + file_name)
print('raw prediction\'s shape: ' + str(prediction.shape))
for var in ['id', 'outer_fold']:
prediction[var] = prediction[var].apply(str)
prediction.rename(columns={'pred': 'pred_' + version}, inplace=True)
# merge data frames
if Predictions_subgroup is None:
Predictions_subgroup = prediction
elif self.fold == 'train':
Predictions_subgroup = Predictions_subgroup.merge(prediction, how='outer',
on=['id', 'outer_fold'])
else:
prediction.drop(['outer_fold'], axis=1, inplace=True)
# not supported for panda version > 0.23.4 for now
Predictions_subgroup = Predictions_subgroup.merge(prediction, how='outer', on=['id'])
# merge group predictions data frames
if self.fold != 'train':
Predictions_subgroup.drop(['outer_fold'], axis=1, inplace=True)
if Predictions_subgroup is not None:
if self.Predictions_df is None:
self.Predictions_df = Predictions_subgroup
elif self.fold == 'train':
self.Predictions_df = self.Predictions_df.merge(Predictions_subgroup, how='outer',
on=['id', 'outer_fold'])
else:
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.Predictions_df.merge(Predictions_subgroup, how='outer', on=['id'])
print('Predictions_df\'s shape: ' + str(self.Predictions_df.shape))
# garbage collector
gc.collect()
# Merge with the previously merged predictions
if (self.Predictions_df_previous is not None) & (self.Predictions_df is not None):
if self.fold == 'train':
self.Predictions_df = self.Predictions_df_previous.merge(self.Predictions_df, how='outer',
on=['id', 'outer_fold'])
else:
self.Predictions_df.drop(columns=['outer_fold'], inplace=True)
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.Predictions_df_previous.merge(self.Predictions_df, how='outer', on=['id'])
self.Predictions_df_previous = None
elif self.Predictions_df is None:
print('No new models to merge. Exiting.')
print('Done.')
sys.exit(0)
# Reorder the columns alphabetically
pred_versions = [col for col in self.Predictions_df.columns if 'pred_' in col]
pred_versions.sort()
id_cols = ['id', 'outer_fold'] if self.fold == 'train' else ['id']
self.Predictions_df = self.Predictions_df[id_cols + pred_versions]
def postprocessing(self):
# get rid of useless rows in data_features before merging to keep the memory requirements as low as possible
self.data_features = self.data_features[self.data_features['id'].isin(self.Predictions_df['id'].values)]
# merge data_features and predictions
if self.fold == 'train':
print('Starting to merge a massive dataframe')
self.Predictions_df = self.data_features.merge(self.Predictions_df, how='outer', on=['id', 'outer_fold'])
else:
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.data_features.merge(self.Predictions_df, how='outer', on=['id'])
print('Merging done')
# remove rows for which no prediction is available (should be none)
subset_cols = [col for col in self.Predictions_df.columns if 'pred_' in col]
self.Predictions_df.dropna(subset=subset_cols, how='all', inplace=True)
# Displaying the R2s
versions = [col.replace('pred_', '') for col in self.Predictions_df.columns if 'pred_' in col]
r2s = []
for version in versions:
df = self.Predictions_df[[self.target, 'pred_' + version]].dropna()
r2s.append(r2_score(df[self.target], df['pred_' + version]))
R2S = pd.DataFrame({'version': versions, 'R2': r2s})
R2S.sort_values(by='R2', ascending=False, inplace=True)
print('R2 for each model: ')
print(R2S)
def save_merged_predictions(self):
print('Writing the merged predictions...')
self.Predictions_df.to_csv(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' +
self.fold + '.csv', index=False)
class PredictionsEids(Basics):
"""
Computes the average age prediction across samples from different instances for every participant.
(Scaled back to instance 0)
"""
def __init__(self, target=None, fold=None, debug_mode=None):
Basics.__init__(self)
# Define dictionaries attributes for data, generators and predictions
self.target = target
self.fold = fold
self.debug_mode = debug_mode
self.Predictions = None
self.Predictions_chunk = None
self.pred_versions = None
self.res_versions = None
self.target_0s = None
self.Predictions_eids = None
self.Predictions_eids_previous = None
self.pred_versions_previous = None
def preprocessing(self):
# Load predictions
self.Predictions = pd.read_csv(
self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' + self.fold + '.csv')
self.Predictions.drop(columns=['id'], inplace=True)
self.Predictions['eid'] = self.Predictions['eid'].astype(str)
self.Predictions.index.name = 'column_names'
self.pred_versions = [col for col in self.Predictions.columns.values if 'pred_' in col]
# Prepare target values on instance 0 as a reference
target_0s = pd.read_csv(self.path_data + 'data-features_eids.csv', usecols=['eid', self.target])
target_0s['eid'] = target_0s['eid'].astype(str)
target_0s.set_index('eid', inplace=True)
target_0s = target_0s[self.target]
target_0s.name = 'target_0'
target_0s = target_0s[self.Predictions['eid'].unique()]
self.Predictions = self.Predictions.merge(target_0s, on='eid')
# Compute biological ages reported to target_0
for pred in self.pred_versions:
# Compute the biais of the predictions as a function of age
print('Generating residuals for model ' + pred.replace('pred_', ''))
df_model = self.Predictions[['Age', pred]]
df_model.dropna(inplace=True)
if (len(df_model.index)) > 0:
age = df_model.loc[:, ['Age']]
res = df_model['Age'] - df_model[pred]
regr = LinearRegression()
regr.fit(age, res)
self.Predictions[pred.replace('pred_', 'correction_')] = regr.predict(self.Predictions[['Age']])
# Take the residuals bias into account when "translating" the prediction to instance 0
correction = self.Predictions['target_0'] - self.Predictions[self.target] + \
regr.predict(self.Predictions[['Age']]) - regr.predict(self.Predictions[['target_0']])
self.Predictions[pred] = self.Predictions[pred] + correction
self.Predictions[self.target] = self.Predictions['target_0']
self.Predictions.drop(columns=['target_0'], inplace=True)
self.Predictions.index.name = 'column_names'
def processing(self):
if self.fold == 'train':
# Prepare template to which each model will be appended
Predictions = self.Predictions[['eid'] + self.demographic_vars]
Predictions = Predictions.groupby('eid', as_index=True).mean()
Predictions.index.name = 'column_names'
Predictions['eid'] = Predictions.index.values
Predictions['instance'] = '*'
Predictions['id'] = Predictions['eid'] + '_*'
self.Predictions_eids = Predictions.copy()
self.Predictions_eids['outer_fold'] = -1
for i in range(self.n_CV_outer_folds):
Predictions_i = Predictions.copy()
Predictions_i['outer_fold'] = i
self.Predictions_eids = self.Predictions_eids.append(Predictions_i)
# Append each model one by one because the folds are different
print(str(len(self.pred_versions)) + ' models to compute.')
for pred_version in self.pred_versions:
if pred_version in self.pred_versions_previous:
print(pred_version.replace('pred_', '') + ' had already been computed.')
else:
print("Computing results for version " + pred_version.replace('pred_', ''))
Predictions_version = self.Predictions[['eid', pred_version, 'outer_fold']]
# Use placeholder for NaN in outer_folds
Predictions_version['outer_fold'][Predictions_version['outer_fold'].isna()] = -1
Predictions_version_eids = Predictions_version.groupby(['eid', 'outer_fold'], as_index=False).mean()
self.Predictions_eids = self.Predictions_eids.merge(Predictions_version_eids,
on=['eid', 'outer_fold'], how='outer')
self.Predictions_eids[of_version] = self.Predictions_eids['outer_fold']
self.Predictions_eids[of_version][self.Predictions_eids[of_version] == -1] = np.nan
del Predictions_version
_ = gc.collect
self.Predictions_eids.drop(columns=['outer_fold'], inplace=True)
else:
self.Predictions_eids = self.Predictions.groupby('eid').mean()
self.Predictions_eids['eid'] = self.Predictions_eids.index.values
self.Predictions_eids['instance'] = '*'
self.Predictions_eids['id'] = self.Predictions_eids['eid'].astype(str) + '_' + \
self.Predictions_eids['instance']
# Re-order the columns
self.Predictions_eids = self.Predictions_eids[self.id_vars + self.demographic_vars + self.pred_versions]
def postprocessing(self):
# Displaying the R2s
versions = [col.replace('pred_', '') for col in self.Predictions_eids.columns if 'pred_' in col]
r2s = []
for version in versions:
df = self.Predictions_eids[[self.target, 'pred_' + version]].dropna()
r2s.append(r2_score(df[self.target], df['pred_' + version]))
R2S = pd.DataFrame({'version': versions, 'R2': r2s})
R2S.sort_values(by='R2', ascending=False, inplace=True)
print('R2 for each model: ')
print(R2S)
def _generate_single_model_predictions(self):
for pred_version in self.pred_versions:
path_save = \
self.path_data + 'Predictions_eids_' + '_'.join(pred_version.split('_')[1:]) + '_' + self.fold + '.csv'
# Generate only if does not exist already.
if not os.path.exists(path_save):
Predictions_version = self.Predictions_eids[['id', 'outer_fold', pred_version]]
Predictions_version.rename(columns={pred_version: 'pred'}, inplace=True)
Predictions_version.dropna(subset=['pred'], inplace=True)
Predictions_version.to_csv(path_save, index=False)
def save_predictions(self):
self.Predictions_eids.to_csv(self.path_data + 'PREDICTIONS_withoutEnsembles_eids_' + self.target + '_' +
self.fold + '.csv', index=False)
# Generate and save files for every single model
self._generate_single_model_predictions()
class PerformancesGenerate(Metrics):
"""
Computes the performances for each model.
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, fold=None, pred_type=None, debug_mode=False):
Metrics.__init__(self)
self.target = target
self.organ = organ
self.view = view
self.transformation = transformation
self.architecture = architecture
self.n_fc_layers = n_fc_layers
self.n_fc_nodes = n_fc_nodes
self.optimizer = optimizer
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.dropout_rate = dropout_rate
self.data_augmentation_factor = data_augmentation_factor
self.fold = fold
self.pred_type = pred_type
if debug_mode:
self.n_bootstrap_iterations = 3
else:
self.n_bootstrap_iterations = 1000
self.version = target + '_' + organ + '_' + view + '_' + transformation + '_' + architecture + '_' + \
n_fc_layers + '_' + n_fc_nodes + '_' + optimizer + '_' + learning_rate + '_' + weight_decay + \
'_' + dropout_rate + '_' + data_augmentation_factor
self.names_metrics = self.dict_metrics_names[self.dict_prediction_types[target]]
self.data_features = None
self.Predictions = None
self.PERFORMANCES = None
def _preprocess_data_features_predictions_for_performances(self):
# load dataset
data_features = pd.read_csv(self.path_data + 'data-features_' + self.pred_type + '.csv',
usecols=['id', 'Sex', 'Age'])
# format data_features to extract y
data_features.rename(columns={self.target: 'y'}, inplace=True)
data_features = data_features[['id', 'y']]
data_features['id'] = data_features['id'].astype(str)
data_features['id'] = data_features['id']
data_features.set_index('id', drop=False, inplace=True)
data_features.index.name = 'columns_names'
self.data_features = data_features
def _preprocess_predictions_for_performances(self):
Predictions = pd.read_csv(self.path_data + 'Predictions_' + self.pred_type + '_' + self.version + '_' +
self.fold + '.csv')
Predictions['id'] = Predictions['id'].astype(str)
self.Predictions = Predictions.merge(self.data_features, how='inner', on=['id'])
# Initialize performances dataframes and compute sample sizes
def _initiate_empty_performances_df(self):
# Define an empty performances dataframe to store the performances computed
row_names = ['all'] + self.outer_folds
col_names_sample_sizes = ['N']
if self.target in self.targets_binary:
col_names_sample_sizes.extend(['N_0', 'N_1'])
col_names = ['outer_fold'] + col_names_sample_sizes
col_names.extend(self.names_metrics)
performances = np.empty((len(row_names), len(col_names),))
performances.fill(np.nan)
performances = pd.DataFrame(performances)
performances.index = row_names
performances.columns = col_names
performances['outer_fold'] = row_names
# Convert float to int for sample sizes and some metrics.
for col_name in col_names_sample_sizes:
# need recent version of pandas to use type below. Otherwise nan cannot be int
performances[col_name] = performances[col_name].astype('Int64')
# compute sample sizes for the data frame
performances.loc['all', 'N'] = len(self.Predictions.index)
if self.target in self.targets_binary:
performances.loc['all', 'N_0'] = len(self.Predictions.loc[self.Predictions['y'] == 0].index)
performances.loc['all', 'N_1'] = len(self.Predictions.loc[self.Predictions['y'] == 1].index)
for outer_fold in self.outer_folds:
performances.loc[outer_fold, 'N'] = len(
self.Predictions.loc[self.Predictions['outer_fold'] == int(outer_fold)].index)
if self.target in self.targets_binary:
performances.loc[outer_fold, 'N_0'] = len(
self.Predictions.loc[
(self.Predictions['outer_fold'] == int(outer_fold)) & (self.Predictions['y'] == 0)].index)
performances.loc[outer_fold, 'N_1'] = len(
self.Predictions.loc[
(self.Predictions['outer_fold'] == int(outer_fold)) & (self.Predictions['y'] == 1)].index)
# initialize the dataframes
self.PERFORMANCES = {}
for mode in self.modes:
self.PERFORMANCES[mode] = performances.copy()
# Convert float to int for sample sizes and some metrics.
for col_name in self.PERFORMANCES[''].columns.values:
if any(metric in col_name for metric in self.metrics_displayed_in_int):
# need recent version of pandas to use type below. Otherwise nan cannot be int
self.PERFORMANCES[''][col_name] = self.PERFORMANCES[''][col_name].astype('Int64')
def preprocessing(self):
self._preprocess_data_features_predictions_for_performances()
self._preprocess_predictions_for_performances()
self._initiate_empty_performances_df()
# Fill the columns for this model, outer_fold by outer_fold
def compute_performances(self):
# fill it outer_fold by outer_fold
for outer_fold in ['all'] + self.outer_folds:
print('Calculating the performances for the outer fold ' + outer_fold)
# Generate a subdataframe from the predictions table for each outerfold
if outer_fold == 'all':
predictions_fold = self.Predictions.copy()
else:
predictions_fold = self.Predictions.loc[self.Predictions['outer_fold'] == int(outer_fold), :]
# if no samples are available for this fold, fill columns with nans
if len(predictions_fold.index) == 0:
print('NO SAMPLES AVAILABLE FOR MODEL ' + self.version + ' IN OUTER_FOLD ' + outer_fold)
else:
# For binary classification, generate class prediction
if self.target in self.targets_binary:
predictions_fold_class = predictions_fold.copy()
predictions_fold_class['pred'] = predictions_fold_class['pred'].round()
else:
predictions_fold_class = None
# Fill the Performances dataframe metric by metric
for name_metric in self.names_metrics:
# print('Calculating the performance using the metric ' + name_metric)
if name_metric in self.metrics_needing_classpred:
predictions_metric = predictions_fold_class
else:
predictions_metric = predictions_fold
metric_function = self.dict_metrics_sklearn[name_metric]
self.PERFORMANCES[''].loc[outer_fold, name_metric] = metric_function(predictions_metric['y'],
predictions_metric['pred'])
self.PERFORMANCES['_sd'].loc[outer_fold, name_metric] = \
self._bootstrap(predictions_metric, metric_function)[1]
self.PERFORMANCES['_str'].loc[outer_fold, name_metric] = "{:.3f}".format(
self.PERFORMANCES[''].loc[outer_fold, name_metric]) + '+-' + "{:.3f}".format(
self.PERFORMANCES['_sd'].loc[outer_fold, name_metric])
# calculate the fold sd (variance between the metrics values obtained on the different folds)
folds_sd = self.PERFORMANCES[''].iloc[1:, :].std(axis=0)
for name_metric in self.names_metrics:
self.PERFORMANCES['_str'].loc['all', name_metric] = "{:.3f}".format(
self.PERFORMANCES[''].loc['all', name_metric]) + '+-' + "{:.3f}".format(
folds_sd[name_metric]) + '+-' + "{:.3f}".format(self.PERFORMANCES['_sd'].loc['all', name_metric])
# print the performances
print('Performances for model ' + self.version + ': ')
print(self.PERFORMANCES['_str'])
def save_performances(self):
for mode in self.modes:
path_save = self.path_data + 'Performances_' + self.pred_type + '_' + self.version + '_' + self.fold + \
mode + '.csv'
self.PERFORMANCES[mode].to_csv(path_save, index=False)
class PerformancesMerge(Metrics):
"""
Merges the performances of the different models into a unified dataframe.
"""
def __init__(self, target=None, fold=None, pred_type=None, ensemble_models=None):
# Parameters
Metrics.__init__(self)
self.target = target
self.fold = fold
self.pred_type = pred_type
self.ensemble_models = self.convert_string_to_boolean(ensemble_models)
self.names_metrics = self.dict_metrics_names[self.dict_prediction_types[target]]
self.main_metric_name = self.dict_main_metrics_names[target]
# list the models that need to be merged
self.list_models = glob.glob(self.path_data + 'Performances_' + pred_type + '_' + target + '_*_' + fold +
'_str.csv')
# get rid of ensemble models
if self.ensemble_models:
self.list_models = [model for model in self.list_models if '*' in model]
else:
self.list_models = [model for model in self.list_models if '*' not in model]
self.Performances = None
self.Performances_alphabetical = None
self.Performances_ranked = None
def _initiate_empty_performances_summary_df(self):
# Define the columns of the Performances dataframe
# columns for sample sizes
names_sample_sizes = ['N']
if self.target in self.targets_binary:
names_sample_sizes.extend(['N_0', 'N_1'])
# columns for metrics
names_metrics = self.dict_metrics_names[self.dict_prediction_types[self.target]]
# for normal folds, keep track of metric and bootstrapped metric's sd
names_metrics_with_sd = []
for name_metric in names_metrics:
names_metrics_with_sd.extend([name_metric, name_metric + '_sd', name_metric + '_str'])
# for the 'all' fold, also keep track of the 'folds_sd' (metric's sd calculated using the folds' results)
names_metrics_with_folds_sd_and_sd = []
for name_metric in names_metrics:
names_metrics_with_folds_sd_and_sd.extend(
[name_metric, name_metric + '_folds_sd', name_metric + '_sd', name_metric + '_str'])
# merge all the columns together. First description of the model, then sample sizes and metrics for each fold
names_col_Performances = ['version'] + self.names_model_parameters
# special outer fold 'all'
names_col_Performances.extend(
['_'.join([name, 'all']) for name in names_sample_sizes + names_metrics_with_folds_sd_and_sd])
# other outer_folds
for outer_fold in self.outer_folds:
names_col_Performances.extend(
['_'.join([name, outer_fold]) for name in names_sample_sizes + names_metrics_with_sd])
# Generate the empty Performance table from the rows and columns.
Performances = np.empty((len(self.list_models), len(names_col_Performances),))
Performances.fill(np.nan)
Performances = pd.DataFrame(Performances)
Performances.columns = names_col_Performances
# Format the types of the columns
for colname in Performances.columns.values:
if (colname in self.names_model_parameters) | ('_str' in colname):
col_type = str
else:
col_type = float
Performances[colname] = Performances[colname].astype(col_type)
self.Performances = Performances
def merge_performances(self):
# define parameters
names_metrics = self.dict_metrics_names[self.dict_prediction_types[self.target]]
# initiate dataframe
self._initiate_empty_performances_summary_df()
# Fill the Performance table row by row
for i, model in enumerate(self.list_models):
# load the performances subdataframe
PERFORMANCES = {}
for mode in self.modes:
PERFORMANCES[mode] = pd.read_csv(model.replace('_str', mode))
PERFORMANCES[mode].set_index('outer_fold', drop=False, inplace=True)
# Fill the columns corresponding to the model's parameters
version = '_'.join(model.split('_')[2:-2])
parameters = self._version_to_parameters(version)
# fill the columns for model parameters
self.Performances['version'][i] = version
for parameter_name in self.names_model_parameters:
self.Performances[parameter_name][i] = parameters[parameter_name]
# Fill the columns for this model, outer_fold by outer_fold
for outer_fold in ['all'] + self.outer_folds:
# Generate a subdataframe from the predictions table for each outerfold
# Fill sample size columns
self.Performances['N_' + outer_fold][i] = PERFORMANCES[''].loc[outer_fold, 'N']
# For binary classification, calculate sample sizes for each class and generate class prediction
if self.target in self.targets_binary:
self.Performances['N_0_' + outer_fold][i] = PERFORMANCES[''].loc[outer_fold, 'N_0']
self.Performances['N_1_' + outer_fold][i] = PERFORMANCES[''].loc[outer_fold, 'N_1']
# Fill the Performances dataframe metric by metric
for name_metric in names_metrics:
for mode in self.modes:
self.Performances[name_metric + mode + '_' + outer_fold][i] = PERFORMANCES[mode].loc[
outer_fold, name_metric]
# calculate the fold sd (variance between the metrics values obtained on the different folds)
folds_sd = PERFORMANCES[''].iloc[1:, :].std(axis=0)
for name_metric in names_metrics:
self.Performances[name_metric + '_folds_sd_all'] = folds_sd[name_metric]
# Convert float to int for sample sizes and some metrics.
for name_col in self.Performances.columns.values:
cond1 = name_col.startswith('N_')
cond2 = any(metric in name_col for metric in self.metrics_displayed_in_int)
cond3 = '_sd' not in name_col
cond4 = '_str' not in name_col
if cond1 | cond2 & cond3 & cond4:
self.Performances[name_col] = self.Performances[name_col].astype('Int64')
# need recent version of pandas to use this type. Otherwise nan cannot be int
# For ensemble models, merge the new performances with the previously computed performances
if self.ensemble_models:
Performances_withoutEnsembles = pd.read_csv(self.path_data + 'PERFORMANCES_tuned_alphabetical_' +
self.pred_type + '_' + self.target + '_' + self.fold + '.csv')
self.Performances = Performances_withoutEnsembles.append(self.Performances)
# reorder the columns (weird: automatic alphabetical re-ordering happened when append was called for 'val')
self.Performances = self.Performances[Performances_withoutEnsembles.columns]
# Ranking, printing and saving
self.Performances_alphabetical = self.Performances.sort_values(by='version')
cols_to_print = ['version', self.main_metric_name + '_str_all']
print('Performances of the models ranked by models\'names:')
print(self.Performances_alphabetical[cols_to_print])
sort_by = self.dict_main_metrics_names[self.target] + '_all'
sort_ascending = self.main_metrics_modes[self.dict_main_metrics_names[self.target]] == 'min'
self.Performances_ranked = self.Performances.sort_values(by=sort_by, ascending=sort_ascending)
print('Performances of the models ranked by the performance on the main metric on all the samples:')
print(self.Performances_ranked[cols_to_print])
def save_performances(self):
name_extension = 'withEnsembles' if self.ensemble_models else 'withoutEnsembles'
path = self.path_data + 'PERFORMANCES_' + name_extension + '_alphabetical_' + self.pred_type + '_' + \
self.target + '_' + self.fold + '.csv'
self.Performances_alphabetical.to_csv(path, index=False)
self.Performances_ranked.to_csv(path.replace('_alphabetical_', '_ranked_'), index=False)
class PerformancesTuning(Metrics):
"""
For each model, selects the best hyperparameter combination.
"""
def __init__(self, target=None, pred_type=None):
Metrics.__init__(self)
self.target = target
self.pred_type = pred_type
self.PERFORMANCES = {}
self.PREDICTIONS = {}
self.Performances = None
self.models = None
self.folds = ['val', 'test']
def load_data(self):
for fold in self.folds:
path = self.path_data + 'PERFORMANCES_withoutEnsembles_ranked_' + self.pred_type + '_' + self.target + \
'_' + fold + '.csv'
self.PERFORMANCES[fold] = pd.read_csv(path).set_index('version', drop=False)
self.PERFORMANCES[fold]['organ'] = self.PERFORMANCES[fold]['organ'].astype(str)
self.PERFORMANCES[fold].index.name = 'columns_names'
self.PREDICTIONS[fold] = pd.read_csv(path.replace('PERFORMANCES', 'PREDICTIONS').replace('_ranked', ''))
def preprocess_data(self):
# Get list of distinct models without taking into account hyperparameters tuning
self.Performances = self.PERFORMANCES['val']
self.Performances['model'] = self.Performances['organ'] + '_' + self.Performances['view'] + '_' + \
self.Performances['transformation'] + '_' + self.Performances['architecture']
self.models = self.Performances['model'].unique()
def select_models(self):
main_metric_name = self.dict_main_metrics_names[self.target]
main_metric_mode = self.main_metrics_modes[main_metric_name]
Perf_col_name = main_metric_name + '_all'
for model in self.models:
Performances_model = self.Performances[self.Performances['model'] == model]
Performances_model.sort_values([Perf_col_name, 'n_fc_layers', 'n_fc_nodes', 'learning_rate', 'dropout_rate',
'weight_decay', 'data_augmentation_factor'],
ascending=[main_metric_mode == 'min', True, True, False, False, False,
False], inplace=True)
best_version = Performances_model['version'][
Performances_model[Perf_col_name] == Performances_model[Perf_col_name].max()].values[0]
versions_to_drop = [version for version in Performances_model['version'].values if
not version == best_version]
# define columns from predictions to drop
cols_to_drop = ['pred_' + version for version in versions_to_drop] + ['outer_fold_' + version for version in
versions_to_drop]
for fold in self.folds:
self.PERFORMANCES[fold].drop(versions_to_drop, inplace=True)
self.PREDICTIONS[fold].drop(cols_to_drop, axis=1, inplace=True)
# drop 'model' column
self.Performances.drop(['model'], axis=1, inplace=True)
# Display results
for fold in self.folds:
print('The tuned ' + fold + ' performances are:')
print(self.PERFORMANCES[fold])
def save_data(self):
# Save the files
for fold in self.folds:
path_pred = self.path_data + 'PREDICTIONS_tuned_' + self.pred_type + '_' + self.target + '_' + fold + \
'.csv'
path_perf = self.path_data + 'PERFORMANCES_tuned_ranked_' + self.pred_type + '_' + self.target + '_' + \
fold + '.csv'
self.PREDICTIONS[fold].to_csv(path_pred, index=False)
self.PERFORMANCES[fold].to_csv(path_perf, index=False)
Performances_alphabetical = self.PERFORMANCES[fold].sort_values(by='version')
Performances_alphabetical.to_csv(path_perf.replace('ranked', 'alphabetical'), index=False)
# This class was coded by <NAME>.
class InnerCV:
"""
Helper class to perform an inner cross validation to tune the hyperparameters of models trained on scalar predictors
"""
def __init__(self, models, inner_splits, n_iter):
self.inner_splits = inner_splits
self.n_iter = n_iter
if isinstance(models, str):
models = [models]
self.models = models
@staticmethod
def get_model(model_name, params):
if model_name == 'ElasticNet':
return ElasticNet(max_iter=2000, **params)
elif model_name == 'RandomForest':
return RandomForestRegressor(**params)
elif model_name == 'GradientBoosting':
return GradientBoostingRegressor(**params)
elif model_name == 'Xgboost':
return XGBRegressor(**params)
elif model_name == 'LightGbm':
return LGBMRegressor(**params)
elif model_name == 'NeuralNetwork':
return MLPRegressor(solver='adam',
activation='relu',
hidden_layer_sizes=(128, 64, 32),
batch_size=1000,
early_stopping=True, **params)
@staticmethod
def get_hyper_distribution(model_name):
if model_name == 'ElasticNet':
return {
'alpha': hp.loguniform('alpha', low=np.log(0.01), high=np.log(10)),
'l1_ratio': hp.uniform('l1_ratio', low=0.01, high=0.99)
}
elif model_name == 'RandomForest':
return {
'n_estimators': hp.randint('n_estimators', upper=300) + 150,
'max_features': hp.choice('max_features', ['auto', 0.9, 0.8, 0.7, 0.6, 0.5, 0.4]),
'max_depth': hp.choice('max_depth', [None, 10, 8, 6])
}
elif model_name == 'GradientBoosting':
return {
'n_estimators': hp.randint('n_estimators', upper=300) + 150,
'max_features': hp.choice('max_features', ['auto', 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3]),
'learning_rate': hp.uniform('learning_rate', low=0.01, high=0.3),
'max_depth': hp.randint('max_depth', 10) + 5
}
elif model_name == 'Xgboost':
return {
'colsample_bytree': hp.uniform('colsample_bytree', low=0.2, high=0.7),
'gamma': hp.uniform('gamma', low=0.1, high=0.5),
'learning_rate': hp.uniform('learning_rate', low=0.02, high=0.2),
'max_depth': hp.randint('max_depth', 10) + 5,
'n_estimators': hp.randint('n_estimators', 300) + 150,
'subsample': hp.uniform('subsample', 0.2, 0.8)
}
elif model_name == 'LightGbm':
return {
'num_leaves': hp.randint('num_leaves', 40) + 5,
'min_child_samples': hp.randint('min_child_samples', 400) + 100,
'min_child_weight': hp.choice('min_child_weight', [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4]),
'subsample': hp.uniform('subsample', low=0.2, high=0.8),
'colsample_bytree': hp.uniform('colsample_bytree', low=0.4, high=0.6),
'reg_alpha': hp.choice('reg_alpha', [0, 1e-1, 1, 2, 5, 7, 10, 50, 100]),
'reg_lambda': hp.choice('reg_lambda', [0, 1e-1, 1, 5, 10, 20, 50, 100]),
'n_estimators': hp.randint('n_estimators', 300) + 150
}
elif model_name == 'NeuralNetwork':
return {
'learning_rate_init': hp.loguniform('learning_rate_init', low=np.log(5e-5), high=np.log(2e-2)),
'alpha': hp.uniform('alpha', low=1e-6, high=1e3)
}
def create_folds(self, X, y):
"""
X columns : eid + features except target
y columns : eid + target
"""
X_eid = X.drop_duplicates('eid')
y_eid = y.drop_duplicates('eid')
eids = X_eid.eid
# Kfold on the eid, then regroup all ids
inner_cv = KFold(n_splits=self.inner_splits, shuffle=False, random_state=0)
list_test_folds = [elem[1] for elem in inner_cv.split(X_eid, y_eid)]
list_test_folds_eid = [eids[elem].values for elem in list_test_folds]
list_test_folds_id = [X.index[X.eid.isin(list_test_folds_eid[elem])].values for elem in
range(len(list_test_folds_eid))]
return list_test_folds_id
def optimize_hyperparameters(self, X, y, scoring):
"""
input X : dataframe with features + eid
input y : dataframe with target + eid
"""
if 'instance' in X.columns:
X = X.drop(columns=['instance'])
if 'instance' in y.columns:
y = y.drop(columns=['instance'])
list_test_folds_id = self.create_folds(X, y)
X = X.drop(columns=['eid'])
y = y.drop(columns=['eid'])
# Create custom Splits
list_test_folds_id_index = [np.array([X.index.get_loc(elem) for elem in list_test_folds_id[fold_num]])
for fold_num in range(len(list_test_folds_id))]
test_folds = np.zeros(len(X), dtype='int')
for fold_count in range(len(list_test_folds_id)):
test_folds[list_test_folds_id_index[fold_count]] = fold_count
inner_cv = PredefinedSplit(test_fold=test_folds)
list_best_params = {}
list_best_score = {}
objective, model_name = None, None
for model_name in self.models:
def objective(hyperparameters):
estimator_ = self.get_model(model_name, hyperparameters)
pipeline = Pipeline([('scaler', StandardScaler()), ('estimator', estimator_)])
scores = cross_validate(pipeline, X.values, y, scoring=scoring, cv=inner_cv, n_jobs=self.inner_splits)
return {'status': STATUS_OK, 'loss': -scores['test_score'].mean(),
'attachments': {'split_test_scores_and_params': (scores['test_score'], hyperparameters)}}
space = self.get_hyper_distribution(model_name)
trials = Trials()
best = fmin(objective, space, algo=tpe.suggest, max_evals=self.n_iter, trials=trials)
best_params = space_eval(space, best)
list_best_params[model_name] = best_params
list_best_score[model_name] = - min(trials.losses())
# Recover best between all models :
best_model = max(list_best_score.keys(), key=(lambda k: list_best_score[k]))
best_model_hyp = list_best_params[best_model]
# Recreate best estim :
estim = self.get_model(best_model, best_model_hyp)
pipeline_best = Pipeline([('scaler', StandardScaler()), ('estimator', estim)])
pipeline_best.fit(X.values, y)
return pipeline_best
"""
Useful for EnsemblesPredictions. This function needs to be global to allow pool to pickle it.
"""
def compute_ensemble_folds(ensemble_inputs):
if len(ensemble_inputs[1]) < 100:
print('Small sample size:' + str(len(ensemble_inputs[1])))
n_inner_splits = 5
else:
n_inner_splits = 10
# Can use different models: models=['ElasticNet', 'LightGBM', 'NeuralNetwork']
cv = InnerCV(models=['ElasticNet'], inner_splits=n_inner_splits, n_iter=30)
model = cv.optimize_hyperparameters(ensemble_inputs[0], ensemble_inputs[1], scoring='r2')
return model
class EnsemblesPredictions(Metrics):
"""
Hierarchically builds ensemble models from the already existing predictions.
"""
def __init__(self, target=None, pred_type=None, regenerate_models=False):
# Parameters
Metrics.__init__(self)
self.target = target
self.pred_type = pred_type
self.folds = ['val', 'test']
self.regenerate_models = regenerate_models
self.ensembles_performance_cutoff_percent = 0.5
self.parameters = {'target': target, 'organ': '*', 'view': '*', 'transformation': '*', 'architecture': '*',
'n_fc_layers': '*', 'n_fc_nodes': '*', 'optimizer': '*', 'learning_rate': '*',
'weight_decay': '*', 'dropout_rate': '*', 'data_augmentation_factor': '*'}
self.version = self._parameters_to_version(self.parameters)
self.main_metric_name = self.dict_main_metrics_names[target]
self.init_perf = -np.Inf if self.main_metrics_modes[self.main_metric_name] == 'max' else np.Inf
path_perf = self.path_data + 'PERFORMANCES_tuned_ranked_' + pred_type + '_' + target + '_val.csv'
self.Performances = pd.read_csv(path_perf).set_index('version', drop=False)
self.Performances['organ'] = self.Performances['organ'].astype(str)
self.list_ensemble_levels = ['transformation', 'view', 'organ']
self.PREDICTIONS = {}
self.weights_by_category = None
self.weights_by_ensembles = None
self.N_ensemble_CV_split = 10
self.instancesS = {'instances': ['01', '1.5x', '23'], 'eids': ['*']}
self.instances_names_to_numbers = {'01': ['0', '1'], '1.5x': ['1.5', '1.51', '1.52', '1.53', '1.54'],
'23': ['2', '3'], '*': ['*']}
self.INSTANCES_DATASETS = {
'01': ['Eyes', 'Hearing', 'Lungs', 'Arterial', 'Musculoskeletal', 'Biochemistry', 'ImmuneSystem'],
'1.5x': ['PhysicalActivity'],
'23': ['Brain', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal'],
'*': ['Brain', 'Eyes', 'Hearing', 'Lungs', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity', 'ImmuneSystem']
}
# Get rid of columns and rows for the versions for which all samples as NANs
@staticmethod
def _drop_na_pred_versions(PREDS, Performances):
# Select the versions for which only NAs are available
pred_versions = [col for col in PREDS['val'].columns.values if 'pred_' in col]
to_drop = []
for pv in pred_versions:
for fold in PREDS.keys():
if PREDS[fold][pv].notna().sum() == 0:
to_drop.append(pv)
break
# Drop the corresponding columns from preds, and rows from performances
index_to_drop = [p.replace('pred_', '') for p in to_drop if '*' not in p]
for fold in PREDS.keys():
PREDS[fold].drop(to_drop, axis=1, inplace=True)
return Performances.drop(index_to_drop)
def load_data(self):
for fold in self.folds:
self.PREDICTIONS[fold] = pd.read_csv(
self.path_data + 'PREDICTIONS_tuned_' + self.pred_type + '_' + self.target + '_' + fold + '.csv')
def _build_single_ensemble(self, PREDICTIONS, version):
# Drop columns that are exclusively NaNs
all_nan = PREDICTIONS['val'].isna().all() | PREDICTIONS['test'].isna().all()
non_nan_cols = all_nan[~all_nan.values].index
for fold in self.folds:
PREDICTIONS[fold] = PREDICTIONS[fold][non_nan_cols]
Predictions = PREDICTIONS['val']
# Select the columns for the model
ensemble_preds_cols = [col for col in Predictions.columns.values if
bool(re.compile('pred_' + version).match(col))]
# If only one model in the ensemble, just copy the column. Otherwise build the ensemble model
if len(ensemble_preds_cols) == 1:
for fold in self.folds:
PREDICTIONS[fold]['pred_' + version] = PREDICTIONS[fold][ensemble_preds_cols[0]]
else:
# Initiate the dictionaries
PREDICTIONS_OUTERFOLDS = {}
ENSEMBLE_INPUTS = {}
for outer_fold in self.outer_folds:
# take the subset of the rows that correspond to the outer_fold
PREDICTIONS_OUTERFOLDS[outer_fold] = {}
XS_outer_fold = {}
YS_outer_fold = {}
dict_fold_to_outer_folds = {
'val': [float(outer_fold)],
'test': [(float(outer_fold) + 1) % self.n_CV_outer_folds],
'train': [float(of) for of in self.outer_folds
if float(of) not in [float(outer_fold), (float(outer_fold) + 1) % self.n_CV_outer_folds]]
}
for fold in self.folds:
PREDICTIONS_OUTERFOLDS[outer_fold][fold] = \
PREDICTIONS[fold][PREDICTIONS[fold]['outer_fold'].isin(dict_fold_to_outer_folds[fold])]
PREDICTIONS_OUTERFOLDS[outer_fold][fold] = PREDICTIONS_OUTERFOLDS[outer_fold][fold][
['id', 'eid', 'instance', self.target] + ensemble_preds_cols].dropna()
X = PREDICTIONS_OUTERFOLDS[outer_fold][fold][['id', 'eid', 'instance'] + ensemble_preds_cols]
X.set_index('id', inplace=True)
XS_outer_fold[fold] = X
y = PREDICTIONS_OUTERFOLDS[outer_fold][fold][['id', 'eid', self.target]]
y.set_index('id', inplace=True)
YS_outer_fold[fold] = y
ENSEMBLE_INPUTS[outer_fold] = [XS_outer_fold['val'], YS_outer_fold['val']]
# Build ensemble model using ElasticNet and/or LightGBM, Neural Network.
PREDICTIONS_ENSEMBLE = {}
pool = Pool(self.N_ensemble_CV_split)
MODELS = pool.map(compute_ensemble_folds, list(ENSEMBLE_INPUTS.values()))
pool.close()
pool.join()
# Concatenate all outer folds
for outer_fold in self.outer_folds:
for fold in self.folds:
X = PREDICTIONS_OUTERFOLDS[outer_fold][fold][ensemble_preds_cols]
PREDICTIONS_OUTERFOLDS[outer_fold][fold]['pred_' + version] = MODELS[int(outer_fold)].predict(X)
PREDICTIONS_OUTERFOLDS[outer_fold][fold]['outer_fold'] = float(outer_fold)
df_outer_fold = PREDICTIONS_OUTERFOLDS[outer_fold][fold][['id', 'outer_fold',
'pred_' + version]]
# Initiate, or append if some previous outerfolds have already been concatenated
if fold not in PREDICTIONS_ENSEMBLE.keys():
PREDICTIONS_ENSEMBLE[fold] = df_outer_fold
else:
PREDICTIONS_ENSEMBLE[fold] = PREDICTIONS_ENSEMBLE[fold].append(df_outer_fold)
# Add the ensemble predictions to the dataframe
for fold in self.folds:
if fold == 'train':
PREDICTIONS[fold] = PREDICTIONS[fold].merge(PREDICTIONS_ENSEMBLE[fold], how='outer',
on=['id', 'outer_fold'])
else:
PREDICTIONS_ENSEMBLE[fold].drop('outer_fold', axis=1, inplace=True)
PREDICTIONS[fold] = PREDICTIONS[fold].merge(PREDICTIONS_ENSEMBLE[fold], how='outer', on=['id'])
def _build_single_ensemble_wrapper(self, version, ensemble_level):
print('Building the ensemble model ' + version)
pred_version = 'pred_' + version
# Evaluate if the ensemble model should be built
# 1 - separately on instance 0-1, 1.5 and 2-3 (for ensemble at the top level, since overlap between models is 0)
# 2 - piece by piece on each outer_fold
# 1-Compute instances 0-1, 1.5 and 2-3 separately
if ensemble_level == 'organ':
for fold in self.folds:
self.PREDICTIONS[fold][pred_version] = np.nan
# Add an ensemble for each instances (01, 1.5x, and 23)
if self.pred_type == 'instances':
for instances_names in self.instancesS[self.pred_type]:
pv = 'pred_' + version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
self.PREDICTIONS[fold][pv] = np.nan
for instances_names in self.instancesS[self.pred_type]:
print('Building final ensemble model for samples in the instances: ' + instances_names)
# Take subset of rows and columns
instances = self.instances_names_to_numbers[instances_names]
instances_datasets = self.INSTANCES_DATASETS[instances_names]
versions = \
[col.replace('pred_', '') for col in self.PREDICTIONS['val'].columns if 'pred_' in col]
instances_versions = [version for version in versions
if any(dataset in version for dataset in instances_datasets)]
cols_to_keep = self.id_vars + self.demographic_vars + \
['pred_' + version for version in instances_versions]
PREDICTIONS = {}
for fold in self.folds:
PREDICTIONS[fold] = self.PREDICTIONS[fold][self.PREDICTIONS[fold].instance.isin(instances)]
PREDICTIONS[fold] = PREDICTIONS[fold][cols_to_keep]
self._build_single_ensemble(PREDICTIONS, version)
# Print a quick performance estimation for each instance(s)
df_model = PREDICTIONS['test'][[self.target, 'pred_' + version]].dropna()
print(instances_names)
print(self.main_metric_name + ' for instance(s) ' + instances_names + ': ' +
str(r2_score(df_model[self.target], df_model['pred_' + version])))
print('The sample size is ' + str(len(df_model.index)) + '.')
# Add the predictions to the dataframe, chunck by chunk, instances by instances
for fold in self.folds:
self.PREDICTIONS[fold][pred_version][self.PREDICTIONS[fold].instance.isin(instances)] = \
PREDICTIONS[fold][pred_version].values
# Add an ensemble for the instance(s) only
if self.pred_type == 'instances':
pv = 'pred_' + version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
self.PREDICTIONS[fold][pv][self.PREDICTIONS[fold].instance.isin(instances)] = \
PREDICTIONS[fold][pred_version].values
# Add three extra ensemble models for eids, to allow larger sample sizes for GWAS purposes
if self.pred_type == 'eids':
for instances_names in ['01', '1.5x', '23']:
print('Building final sub-ensemble model for samples in the instances: ' + instances_names)
# Keep only relevant columns
instances_datasets = self.INSTANCES_DATASETS[instances_names]
versions = \
[col.replace('pred_', '') for col in self.PREDICTIONS['val'].columns if 'pred_' in col]
instances_versions = [version for version in versions
if any(dataset in version for dataset in instances_datasets)]
cols_to_keep = self.id_vars + self.demographic_vars + \
['pred_' + version for version in instances_versions]
PREDICTIONS = {}
for fold in self.folds:
PREDICTIONS[fold] = self.PREDICTIONS[fold].copy()
PREDICTIONS[fold] = PREDICTIONS[fold][cols_to_keep]
self._build_single_ensemble(PREDICTIONS, version)
# Print a quick performance estimation for each instance(s)
df_model = PREDICTIONS['test'][[self.target, 'pred_' + version]].dropna()
print(instances_names)
print(self.main_metric_name + ' for instance(s) ' + instances_names + ': ' +
str(r2_score(df_model[self.target], df_model['pred_' + version])))
print('The sample size is ' + str(len(df_model.index)) + '.')
# Add the predictions to the dataframe
pv = 'pred_' + version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
for fold in self.folds:
self.PREDICTIONS[fold][pv] = PREDICTIONS[fold][pred_version].values
# 2-Compute fold by fold
else:
self._build_single_ensemble(self.PREDICTIONS, version)
# build and save a dataset for this specific ensemble model
for fold in self.folds:
df_single_ensemble = self.PREDICTIONS[fold][['id', 'outer_fold', pred_version]]
df_single_ensemble.rename(columns={pred_version: 'pred'}, inplace=True)
df_single_ensemble.dropna(inplace=True, subset=['pred'])
df_single_ensemble.to_csv(self.path_data + 'Predictions_' + self.pred_type + '_' + version + '_' + fold +
'.csv', index=False)
# Add extra ensembles at organ level
if ensemble_level == 'organ':
for instances_names in ['01', '1.5x', '23']:
pv = 'pred_' + version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
version_instances = version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
df_single_ensemble = self.PREDICTIONS[fold][['id', 'outer_fold', pv]]
df_single_ensemble.rename(columns={pv: 'pred'}, inplace=True)
df_single_ensemble.dropna(inplace=True, subset=['pred'])
df_single_ensemble.to_csv(self.path_data + 'Predictions_' + self.pred_type + '_' +
version_instances + '_' + fold + '.csv', index=False)
def _recursive_ensemble_builder(self, Performances_grandparent, parameters_parent, version_parent,
list_ensemble_levels_parent):
# Compute the ensemble models for the children first, so that they can be used for the parent model
Performances_parent = Performances_grandparent[
Performances_grandparent['version'].isin(
fnmatch.filter(Performances_grandparent['version'], version_parent))]
# if the last ensemble level has not been reached, go down one level and create a branch for each child.
# Otherwise the leaf has been reached
if len(list_ensemble_levels_parent) > 0:
list_ensemble_levels_child = list_ensemble_levels_parent.copy()
ensemble_level = list_ensemble_levels_child.pop()
list_children = Performances_parent[ensemble_level].unique()
for child in list_children:
parameters_child = parameters_parent.copy()
parameters_child[ensemble_level] = child
version_child = self._parameters_to_version(parameters_child)
# recursive call to the function
self._recursive_ensemble_builder(Performances_parent, parameters_child, version_child,
list_ensemble_levels_child)
else:
ensemble_level = None
# compute the ensemble model for the parent
# Check if ensemble model has already been computed. If it has, load the predictions. If it has not, compute it.
if not self.regenerate_models and \
os.path.exists(self.path_data + 'Predictions_' + self.pred_type + '_' + version_parent + '_test.csv'):
print('The model ' + version_parent + ' has already been computed. Loading it...')
for fold in self.folds:
df_single_ensemble = pd.read_csv(self.path_data + 'Predictions_' + self.pred_type + '_' +
version_parent + '_' + fold + '.csv')
df_single_ensemble.rename(columns={'pred': 'pred_' + version_parent}, inplace=True)
# Add the ensemble predictions to the dataframe
if fold == 'train':
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].merge(df_single_ensemble, how='outer',
on=['id', 'outer_fold'])
else:
df_single_ensemble.drop(columns=['outer_fold'], inplace=True)
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].merge(df_single_ensemble, how='outer', on=['id'])
# Add the extra ensemble models at the 'organ' level
if ensemble_level == 'organ':
if self.pred_type == 'instances':
instances = self.instancesS[self.pred_type]
else:
instances = ['01', '23']
for instances_names in instances:
pv = 'pred_' + version_parent.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version_parent.split('_')[2:])
version_instances = version_parent.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version_parent.split('_')[2:])
df_single_ensemble = pd.read_csv(self.path_data + 'Predictions_' + self.pred_type + '_' +
version_instances + '_' + fold + '.csv')
df_single_ensemble.rename(columns={'pred': pv}, inplace=True)
if fold == 'train':
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].merge(df_single_ensemble, how='outer',
on=['id', 'outer_fold'])
else:
df_single_ensemble.drop(columns=['outer_fold'], inplace=True)
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].merge(df_single_ensemble, how='outer',
on=['id'])
else:
self._build_single_ensemble_wrapper(version_parent, ensemble_level)
# Print a quick performance estimation
df_model = self.PREDICTIONS['test'][[self.target, 'pred_' + version_parent]].dropna()
print(self.main_metric_name + ': ' + str(r2_score(df_model[self.target], df_model['pred_' + version_parent])))
print('The sample size is ' + str(len(df_model.index)) + '.')
def generate_ensemble_predictions(self):
self._recursive_ensemble_builder(self.Performances, self.parameters, self.version, self.list_ensemble_levels)
# Reorder the columns alphabetically
for fold in self.folds:
pred_versions = [col for col in self.PREDICTIONS[fold].columns if 'pred_' in col]
pred_versions.sort()
self.PREDICTIONS[fold] = self.PREDICTIONS[fold][self.id_vars + self.demographic_vars + pred_versions]
# Displaying the R2s
for fold in self.folds:
versions = [col.replace('pred_', '') for col in self.PREDICTIONS[fold].columns if 'pred_' in col]
r2s = []
for version in versions:
df = self.PREDICTIONS[fold][[self.target, 'pred_' + version]].dropna()
r2s.append(r2_score(df[self.target], df['pred_' + version]))
R2S = | pd.DataFrame({'version': versions, 'R2': r2s}) | pandas.DataFrame |
import time
from collections import defaultdict
from datetime import timedelta
import cvxpy as cp
import empiricalutilities as eu
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from transfer_entropy import TransferEntropy
plt.style.use('fivethirtyeight')
# %%
eqs = 'SPY XLK XLV XLF IYZ XLY XLP XLI XLE XLU IYR XLB'\
' DIA IWM ECH EWW EWC EWZ'.split()
fi = 'AGG SHY IEI IEF TLT TIP LQD HYG MBB'.split()
cmdtys = 'GLD SLV DBA DBC USO UNG'.split()
assets = eqs + fi + cmdtys
def cum_rets(rets):
cum_rets = []
cum_rets.append(1)
for i, ret in enumerate(rets):
cum_rets.append(cum_rets[i]*(1+ret))
return cum_rets
# %%
q = 4
res = {}
ete_mats = {}
mod = TransferEntropy(assets=assets)
months = mod.prices.index.to_period('M').unique().to_timestamp()
iters = len(months)-1
with tqdm(total=iters) as pbar:
for start, end in zip(months[:-1], months[1:]):
end -= timedelta(1)
mod.set_timeperiod(start, end)
mod.compute_effective_transfer_entropy(sims=5)
ete = mod.ete.copy()
ete_out = ete.sum(axis=0)
returns = mod.prices.iloc[-1]/mod.prices.iloc[0]-1
vols = mod.data.std()
df = | pd.DataFrame({'returns': returns, 'vol': vols, 'ete': ete_out}) | pandas.DataFrame |
#Copyright 2021 <NAME>, <NAME>, <NAME>
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import pandas as pd
import numpy as np
import os
from pandas.api.types import is_numeric_dtype
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import joblib
from .model_loading import decompress_pickle
from typing import List, Tuple, Union
RESOURCES_DIR = os.path.join(os.path.dirname(__file__), 'resources')
rf_Filename = os.path.join(RESOURCES_DIR, "compressed_rf.pbz2")
Pickled_LR_Model = decompress_pickle(filename=rf_Filename)
del_pattern = r'([^,;\|]+[,;\|]{1}[^,;\|]+){1,}'
del_reg = re.compile(del_pattern)
delimeters = r"(,|;|\|)"
delimeters = re.compile(delimeters)
url_pat = r"(http|ftp|https):\/\/([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?"
url_reg = re.compile(url_pat)
email_pat = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,6}\b"
email_reg = re.compile(email_pat)
stop_words = set(stopwords.words('english'))
class_map = {
0: 'numeric',
1: 'categorical',
2: 'datetime',
3: 'sentence',
4: 'url',
5: 'embedded-number',
6: 'list',
7: 'not-generalizable',
8: 'context-specific'
}
### Private Functions ###
def summary_stats(dat, key_s):
b_data = []
for col in key_s:
nans = np.count_nonzero(pd.isnull(dat[col]))
dist_val = len(pd.unique(dat[col].dropna()))
Total_val = len(dat[col])
mean = 0
std_dev = 0
var = 0
min_val = 0
max_val = 0
if | is_numeric_dtype(dat[col]) | pandas.api.types.is_numeric_dtype |
########################################################
# <NAME> - drigols #
# Last update: 21/09/2021 #
########################################################
def OLS(dic):
import pandas as pd
df = | pd.DataFrame(dic) | pandas.DataFrame |
import numpy as np
import pandas as pd
from pyopenms import FeatureMap, FeatureXMLFile
def extractNamesAndIntensities(feature_dir, sample_names, database):
"""
This function takes .featureXML files, the output of SmartPeak
pre-processing, and extracts the metabolite's reference and its measured
intensity. Furthermore, the peptide references are compared with a
database that has also been used for SmartPeak processing. Only
metabolites that appear in the database are being used. Their formula is
added to the output dataframe. The names of the files that should be
processed are added via a SmartPeak sequence file
Parameters
----------
feature_dir : path
the path to the directory holding the .featureXML files
sample_names : string
the extracted information from a sequence file
database : pandas.DataFrame
the organism specific database with the metabolite mapping
Returns
-------
extracted_data_all : pandas.DataFrame
a dataframe containing the extracted information,
the 4 columns include the sample name, the peptide reference for
the metabolites, their corresponding formulas and the measured
intensity
"""
metabolites_unique = database[0].unique()
extracted_data_dict = {}
cnt = 0
for name in sample_names:
features = FeatureMap()
FeatureXMLFile().load(
feature_dir + "/" + name + ".featureXML", features
)
for f in features:
try:
peptideRef = f.getMetaValue("PeptideRef").decode("utf-8")
except AttributeError:
peptideRef = f.getMetaValue("PeptideRef")
if peptideRef in metabolites_unique:
formula = database[database[0] == peptideRef][1]
extracted_data_dict[cnt] = {
"sample_group_name": name,
"Metabolite": peptideRef,
"Formula": list(formula)[0],
"Intensity": f.getMetaValue("peak_apex_int"),
}
cnt = cnt + 1
extracted_data_all = | pd.DataFrame.from_dict(extracted_data_dict, "index") | pandas.DataFrame.from_dict |
import warnings
import os
import numpy as np
import pandas as pd
from preprocessing.utils import remove_french_accents_and_cedillas_from_dataframe
columns_to_drop = ['nr', 'patient_id', 'eds_end_4digit', 'eds_manual', 'DOB', 'begin_date',
'end_date', 'death_date', 'death_hosp', 'eds_final_id',
'eds_final_begin', 'eds_final_end', 'eds_final_patient_id',
'eds_final_birth', 'eds_final_death', 'eds_final_birth_str',
'date_from', 'date_to']
identification_columns = ['patient_admission_id', 'sample_date']
# defining equivalent dosage labels
fibrinogen_equivalent_dosage_labels = ['fibrinogène', 'fibrinogène, antigène']
creatinine_equivalent_dosage_labels = ['créatinine', 'U-créatinine, colorimétrie', 'créatinine, colorimétrie']
hematocrit_equivalent_dosage_labels = ['hématocrite', 'G-Sgc-hématocrite, ABL', 'G-Sgv-hématocrite, ABL',
'Hématocrite, Smart 546', 'G-Sgv-hématocrite', 'hématocrite, pocH-100i',
'G-Sgvm-hématocrite, ABL', 'hématocrite, impédancemétrie',
'G-Sgv-hématocrite, ABL', 'G-Sga-hématocrite, ABL']
potassium_equivalent_dosage_labels = ['potassium', 'G-Sga-potassium, ABL', 'G-Sgv-potassium, ABL', 'Potassium, Piccolo',
'potassium, potentiométrie', 'G-Sgvm-potassium, ABL', 'G-Sgc-potassium, ABL',
'G-Sgv-potassium', 'U-potassium, potentiométrie indirecte']
sodium_equivalent_dosage_labels = ['sodium', 'G-Sga-sodium, ABL', 'G-Sgv-sodium, ABL', 'sodium, potentiométrie',
'Sodium, Piccolo', 'G-Sgvm-sodium, ABL', 'U-sodium, potentiométrie indirecte',
'G-Sgc-sodium, ABL', 'G-Sgv-sodium']
urea_equivalent_dosage_labels = ['urée', 'urée, colorimétrie', 'U-urée, colorimétrie']
hba1c_equivalent_dosage_labels = ['hémoglobine glyquée',
'hémoglobine glyquée (HbA1c), immunologique d\x92agglutination latex']
hemoglobin_equivalent_dosage_labels = ['hémoglobine', 'G-Sga-hémoglobine, ABL', 'G-Sgv-hémoglobine, ABL',
'hémoglobine, pocH-100i', 'hémoglobine, HemoCue 201', 'G-Sgvm-hémoglobine, ABL',
'G-Sgc-hémoglobine, ABL', 'G-Sgv-hémoglobine']
thrombocytes_equivalent_dosage_labels = ['thrombocytes', 'Thrombocytes, pocH-100i']
leucocytes_equivalent_dosage_labels = ['leucocytes', 'Leucocytes, pocH-100i']
erythrocytes_equivalent_dosage_labels = ['érythrocytes', 'érythrocytes, numération, impédancemétrie']
inr_equivalent_dosage_labels = ['INR', 'INR, turbodensitométrie']
crp_equivalent_dosage_labels = ['protéine C-réactive', 'Protéine C-Réactive (CRP), Piccolo',
'protéine C-réactive (CRP), immunoturbidimétrique latex CP',
'protéine C-réactive, Smart 546']
glucose_equivalent_dosage_labels = ['glucose', 'G-Sga-glucose, ABL', 'G-Sgv-glucose, ABL', 'Glucose',
'Glucose, Piccolo', 'glucose, PAP', 'G-Sgvm-glucose, ABL', 'G-Sgv-glucose',
'G-Sgc-glucose, ABL', 'U-glucose, PAP colorimétrie']
bilirubine_equivalent_dosage_labels = ['bilirubine totale', 'G-Sga-bilirubine totale, ABL',
'G-Sgv-bilirubine totale, ABL', 'Bilirubine totale, Piccolo',
'bilirubine totale, colorimétrie', 'G-Sgvm-bilirubine totale, ABL']
asat_equivalent_dosage_labels = ['ASAT', 'Aspartate aminotransférase (ASAT), Piccolo',
'aspartate aminotransférase (ASAT), colorimétrie']
alat_equivalent_dosage_labels = ['ALAT', 'Alanine aminotransférase (ALAT), Piccolo',
'alanine aminotransférase (ALAT), colorimétrie']
doac_xa_equivalent_dosage_labels = ['Activité anti-Xa (DOAC)', 'Activité anti-Xa (rivaroxaban)',
'Activité anti-Xa (apixaban)', 'Activité anti-Xa (edoxaban)',
'Activité anti-Xa (Apixaban)']
ldl_equivalent_dosage_labels = ['LDL cholestérol calculé', 'cholestérol non-HDL']
equivalence_lists = [fibrinogen_equivalent_dosage_labels, creatinine_equivalent_dosage_labels,
hematocrit_equivalent_dosage_labels,
potassium_equivalent_dosage_labels, sodium_equivalent_dosage_labels,
urea_equivalent_dosage_labels,
hba1c_equivalent_dosage_labels, hemoglobin_equivalent_dosage_labels,
thrombocytes_equivalent_dosage_labels,
leucocytes_equivalent_dosage_labels, erythrocytes_equivalent_dosage_labels,
inr_equivalent_dosage_labels,
crp_equivalent_dosage_labels, glucose_equivalent_dosage_labels,
bilirubine_equivalent_dosage_labels,
asat_equivalent_dosage_labels, alat_equivalent_dosage_labels, doac_xa_equivalent_dosage_labels,
ldl_equivalent_dosage_labels]
dosage_labels_to_exclude = ['érythrocytes agglutinés', 'Type d\'érythrocytes', 'Type des érythrocytes',
'érythrocytes en rouleaux',
'Cristaux cholestérol',
'potassium débit', 'urée débit', 'sodium débit', 'glucose débit',
'protéine C-réactive, POCT',
'activité anti-Xa (HBPM), autre posologie',
'activité anti-Xa (HBPM), thérapeutique, 1x /jour']
blood_material_equivalents = ['sga', 'sgv', 'sgvm', 'sgc']
material_to_exclude = ['LCR', 'liqu. pleural', 'épanchement', 'sg cordon', 'liqu. abdo.', 'liqu. ascite', 'liqu.']
non_numerical_values_to_remove = ['ERROR', 'nan', 'SANS RES.', 'Hémolysé', 'sans resultat',
'NON REALISE', 'NON INTERPRÉT.', 'COA', 'TAM']
def preprocess_labs(lab_df: pd.DataFrame, material_to_include: list = ['any_blood'],
verbose: bool = True) -> pd.DataFrame:
"""
Preprocess the labs dataframe
:param lab_df:
:param material_to_include: list of materials to include where material is one of the following: 'any_blood', 'urine'
:param verbose: print preprocessing safety details
:return:
"""
lab_df = lab_df.copy()
lab_df['patient_admission_id'] = lab_df['patient_id'].astype(str) + '_' + lab_df['begin_date'].apply(
lambda bd: ''.join(bd.split(' ')[0].split('.')))
lab_df.drop(columns_to_drop, axis=1, inplace=True)
lab_names = set([c.split('_')[0] for c in lab_df.columns if c not in identification_columns])
new_lab_column_headers = set(
['_'.join(c.split('_')[1:]) for c in lab_df.columns if c not in identification_columns])
print('Labs measured:', lab_names)
# split lab df into individual lab dfs for every lab name
lab_df_split_by_lab_name = []
for _, lab_name in enumerate(lab_names):
selected_columns = identification_columns + [c for c in lab_df.columns if c.split('_')[0] == lab_name]
individual_lab_df = lab_df[selected_columns].dropna(subset=[f'{lab_name}_value'])
individual_lab_df.columns = identification_columns + ['_'.join(c.split('_')[1:]) for c in
individual_lab_df.columns if c.startswith(lab_name)]
individual_lab_df['lab_name'] = lab_name
lab_df_split_by_lab_name.append(individual_lab_df)
reorganised_lab_df = pd.concat(lab_df_split_by_lab_name, ignore_index=True)
equalized_reorganised_lab_df = reorganised_lab_df.copy()
for equivalence_list in equivalence_lists:
equalized_reorganised_lab_df.loc[
reorganised_lab_df['dosage_label'].isin(equivalence_list[1:]), 'dosage_label'] = equivalence_list[0]
equalized_reorganised_lab_df = equalized_reorganised_lab_df[
~equalized_reorganised_lab_df['dosage_label'].isin(dosage_labels_to_exclude)]
# check that units correspond
for dosage_label in equalized_reorganised_lab_df['dosage_label'].unique():
units_for_dosage_label = \
equalized_reorganised_lab_df[equalized_reorganised_lab_df['dosage_label'] == dosage_label][
'unit_of_measure'].unique()
print(dosage_label, units_for_dosage_label)
if len(units_for_dosage_label) > 1:
warnings.warn(f'{dosage_label} has different units: {units_for_dosage_label}')
raise ValueError(f'{dosage_label} has different units: {units_for_dosage_label}')
# fixing material equivalents and materials to exclude
# raise error if pO2, pCO2 or pH come from arterial and venous blood
for dosage_label in ['pO2', 'pCO2', 'pH']:
dosage_label_materials = \
equalized_reorganised_lab_df[equalized_reorganised_lab_df['dosage_label'].str.contains(dosage_label)][
'material_label'].unique()
if 'sga' in dosage_label_materials and len(dosage_label_materials) > 1:
raise ValueError(f'{dosage_label} has arterial and other materials: {dosage_label_materials}')
equalized_reorganised_lab_df.loc[
reorganised_lab_df['material_label'].isin(blood_material_equivalents), 'material_label'] = 'any_blood'
equalized_reorganised_lab_df = equalized_reorganised_lab_df[
~equalized_reorganised_lab_df['material_label'].isin(material_to_exclude)]
equalized_reorganised_lab_df = equalized_reorganised_lab_df[
equalized_reorganised_lab_df['material_label'].isin(material_to_include)]
# correct non numeric values
equalized_reorganised_lab_df = correct_non_numerical_values(equalized_reorganised_lab_df)
# remove non numerical values in value column
equalized_reorganised_lab_df = equalized_reorganised_lab_df[
~equalized_reorganised_lab_df['value'].isin(non_numerical_values_to_remove)]
equalized_reorganised_lab_df.dropna(subset=['value'], inplace=True)
remaining_non_numerical_values = \
equalized_reorganised_lab_df[ | pd.to_numeric(equalized_reorganised_lab_df['value'], errors='coerce') | pandas.to_numeric |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
( | TS('2015-01-04') | pandas.Timestamp |
"""Electric grid models module."""
import cvxpy as cp
import itertools
from multimethod import multimethod
import natsort
import numpy as np
import opendssdirect
import pandas as pd
import scipy.sparse as sp
import scipy.sparse.linalg
import typing
import mesmo.config
import mesmo.data_interface
import mesmo.utils
logger = mesmo.config.get_logger(__name__)
class ElectricGridModel(mesmo.utils.ObjectBase):
"""Electric grid model object.
Note:
This abstract class only defines the expected variables of linear electric grid model objects,
but does not implement any functionality.
Attributes:
timesteps (pd.Index): Index set of time steps of the current scenario. This is needed for optimization problem
definitions within linear electric grid models (see ``LinearElectricGridModel``).
phases (pd.Index): Index set of the phases.
node_names (pd.Index): Index set of the node names.
node_types (pd.Index): Index set of the node types.
line_names (pd.Index): Index set of the line names.
transformer_names (pd.Index): Index set of the transformer names.
branch_names (pd.Index): Index set of the branch names, i.e., all line names and transformer names.
branch_types (pd.Index): Index set of the branch types.
der_names (pd.Index): Index set of the DER names.
der_types (pd.Index): Index set of the DER types.
nodes (pd.Index): Multi-level / tuple index set of the node types, node names and phases
corresponding to the dimension of the node admittance matrices.
branches (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
corresponding to the dimension of the branch admittance matrices.
lines (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the lines only.
transformers (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the transformers only.
ders (pd.Index): Index set of the DER names, corresponding to the dimension of the DER power vector.
node_voltage_vector_reference (np.ndarray): Node voltage reference / no load vector.
branch_power_vector_magnitude_reference (np.ndarray): Branch power reference / rated power vector.
der_power_vector_reference (np.ndarray): DER power reference / nominal power vector.
is_single_phase_equivalent (bool): Singe-phase-equivalent modelling flag. If true, electric grid is modelled
as single-phase-equivalent of three-phase balanced system.
"""
timesteps: pd.Index
phases: pd.Index
node_names: pd.Index
node_types: pd.Index
line_names: pd.Index
transformer_names: pd.Index
branch_names: pd.Index
branch_types: pd.Index
der_names: pd.Index
der_types: pd.Index
nodes: pd.Index
branches: pd.Index
lines: pd.Index
transformers: pd.Index
ders: pd.Index
node_voltage_vector_reference: np.ndarray
branch_power_vector_magnitude_reference: np.ndarray
der_power_vector_reference: np.ndarray
is_single_phase_equivalent: bool
def __init__(
self,
electric_grid_data: mesmo.data_interface.ElectricGridData
):
# Process overhead line type definitions.
# - This is implemented as direct modification on the electric grid data object and therefore done first.
electric_grid_data = self.process_line_types_overhead(electric_grid_data)
# Obtain index set for time steps.
# - This is needed for optimization problem definitions within linear electric grid models.
self.timesteps = electric_grid_data.scenario_data.timesteps
# Obtain index sets for phases / node names / node types / line names / transformer names /
# branch types / DER names.
self.phases = (
pd.Index(
np.unique(np.concatenate(
electric_grid_data.electric_grid_nodes.apply(
mesmo.utils.get_element_phases_array,
axis=1
).values
))
)
)
self.node_names = pd.Index(electric_grid_data.electric_grid_nodes['node_name'])
self.node_types = pd.Index(['source', 'no_source'])
self.line_names = pd.Index(electric_grid_data.electric_grid_lines['line_name'])
self.transformer_names = pd.Index(electric_grid_data.electric_grid_transformers['transformer_name'])
self.branch_types = pd.Index(['line', 'transformer'])
self.der_names = pd.Index(electric_grid_data.electric_grid_ders['der_name'])
self.der_types = pd.Index(electric_grid_data.electric_grid_ders['der_type'].unique())
# Obtain nodes index set, i.e., collection of all phases of all nodes
# for generating indexing functions for the admittance matrix.
# - The admittance matrix has one entry for each phase of each node in both dimensions.
# - There cannot be "empty" dimensions for missing phases of nodes, because the matrix would become singular.
# - Therefore the admittance matrix must have the exact number of existing phases of all nodes.
node_dimension = (
int(electric_grid_data.electric_grid_nodes.loc[
:,
[
'is_phase_1_connected',
'is_phase_2_connected',
'is_phase_3_connected'
]
].sum().sum())
)
self.nodes = (
pd.DataFrame(
None,
index=range(node_dimension),
columns=[
'node_type',
'node_name',
'phase'
]
)
)
# Fill `node_name`.
self.nodes['node_name'] = (
pd.concat([
electric_grid_data.electric_grid_nodes.loc[
electric_grid_data.electric_grid_nodes['is_phase_1_connected'] == 1,
'node_name'
],
electric_grid_data.electric_grid_nodes.loc[
electric_grid_data.electric_grid_nodes['is_phase_2_connected'] == 1,
'node_name'
],
electric_grid_data.electric_grid_nodes.loc[
electric_grid_data.electric_grid_nodes['is_phase_3_connected'] == 1,
'node_name'
]
], ignore_index=True)
)
# Fill `phase`.
self.nodes['phase'] = (
np.concatenate([
np.repeat(1, sum(electric_grid_data.electric_grid_nodes['is_phase_1_connected'] == 1)),
np.repeat(2, sum(electric_grid_data.electric_grid_nodes['is_phase_2_connected'] == 1)),
np.repeat(3, sum(electric_grid_data.electric_grid_nodes['is_phase_3_connected'] == 1))
])
)
# Fill `node_type`.
self.nodes['node_type'] = 'no_source'
# Set `node_type` for source node.
self.nodes.loc[
self.nodes['node_name'] == (electric_grid_data.electric_grid['source_node_name']),
'node_type'
] = 'source'
# Sort by `node_name`.
self.nodes = (
self.nodes.reindex(index=natsort.order_by_index(
self.nodes.index,
natsort.index_natsorted(self.nodes.loc[:, 'node_name'])
))
)
self.nodes = pd.MultiIndex.from_frame(self.nodes)
# Obtain branches index set, i.e., collection of phases of all branches
# for generating indexing functions for the branch admittance matrices.
# - Branches consider all power delivery elements, i.e., lines as well as transformers.
# - The second dimension of the branch admittance matrices is the number of phases of all nodes.
# - Transformers must have same number of phases per winding and exactly two windings.
line_dimension = (
int(electric_grid_data.electric_grid_lines.loc[
:,
[
'is_phase_1_connected',
'is_phase_2_connected',
'is_phase_3_connected'
]
].sum().sum())
)
transformer_dimension = (
int(electric_grid_data.electric_grid_transformers.loc[
:,
[
'is_phase_1_connected',
'is_phase_2_connected',
'is_phase_3_connected'
]
].sum().sum())
)
self.branches = (
pd.DataFrame(
None,
index=range(line_dimension + transformer_dimension),
columns=[
'branch_type',
'branch_name',
'phase'
]
)
)
# Fill `branch_name`.
self.branches['branch_name'] = (
pd.concat([
electric_grid_data.electric_grid_lines.loc[
electric_grid_data.electric_grid_lines['is_phase_1_connected'] == 1,
'line_name'
],
electric_grid_data.electric_grid_lines.loc[
electric_grid_data.electric_grid_lines['is_phase_2_connected'] == 1,
'line_name'
],
electric_grid_data.electric_grid_lines.loc[
electric_grid_data.electric_grid_lines['is_phase_3_connected'] == 1,
'line_name'
],
electric_grid_data.electric_grid_transformers.loc[
electric_grid_data.electric_grid_transformers['is_phase_1_connected'] == 1,
'transformer_name'
],
electric_grid_data.electric_grid_transformers.loc[
electric_grid_data.electric_grid_transformers['is_phase_2_connected'] == 1,
'transformer_name'
],
electric_grid_data.electric_grid_transformers.loc[
electric_grid_data.electric_grid_transformers['is_phase_3_connected'] == 1,
'transformer_name'
]
], ignore_index=True)
)
# Fill `phase`.
self.branches['phase'] = (
np.concatenate([
np.repeat(1, sum(electric_grid_data.electric_grid_lines['is_phase_1_connected'] == 1)),
np.repeat(2, sum(electric_grid_data.electric_grid_lines['is_phase_2_connected'] == 1)),
np.repeat(3, sum(electric_grid_data.electric_grid_lines['is_phase_3_connected'] == 1)),
np.repeat(1, sum(electric_grid_data.electric_grid_transformers['is_phase_1_connected'] == 1)),
np.repeat(2, sum(electric_grid_data.electric_grid_transformers['is_phase_2_connected'] == 1)),
np.repeat(3, sum(electric_grid_data.electric_grid_transformers['is_phase_3_connected'] == 1))
])
)
# Fill `branch_type`.
self.branches['branch_type'] = (
np.concatenate([
np.repeat('line', line_dimension),
np.repeat('transformer', transformer_dimension)
])
)
# Sort by `branch_type` / `branch_name`.
self.branches = (
self.branches.reindex(index=natsort.order_by_index(
self.branches.index,
natsort.index_natsorted(self.branches.loc[:, 'branch_name'])
))
)
self.branches = (
self.branches.reindex(index=natsort.order_by_index(
self.branches.index,
natsort.index_natsorted(self.branches.loc[:, 'branch_type'])
))
)
self.branches = pd.MultiIndex.from_frame(self.branches)
# Obtain index sets for lines / transformers corresponding to branches.
self.lines = (
self.branches[
mesmo.utils.get_index(self.branches, raise_empty_index_error=False, branch_type='line')
]
)
self.transformers = (
self.branches[
mesmo.utils.get_index(self.branches, raise_empty_index_error=False, branch_type='transformer')
]
)
# Obtain index set for DERs.
self.ders = pd.MultiIndex.from_frame(electric_grid_data.electric_grid_ders[['der_type', 'der_name']])
# Obtain reference / no load voltage vector.
self.node_voltage_vector_reference = np.zeros(len(self.nodes), dtype=complex)
voltage_phase_factors = (
np.array([
np.exp(0 * 1j), # Phase 1.
np.exp(- 2 * np.pi / 3 * 1j), # Phase 2.
np.exp(2 * np.pi / 3 * 1j) # Phase 3.
])
)
for node_name, node in electric_grid_data.electric_grid_nodes.iterrows():
# Obtain phases index & node index for positioning the node voltage in the voltage vector.
phases_index = mesmo.utils.get_element_phases_array(node) - 1
node_index = mesmo.utils.get_index(self.nodes, node_name=node_name)
# Insert voltage into voltage vector.
self.node_voltage_vector_reference[node_index] = (
voltage_phase_factors[phases_index]
* node.at['voltage'] / np.sqrt(3)
)
# Obtain reference / rated branch power vector.
self.branch_power_vector_magnitude_reference = np.zeros(len(self.branches), dtype=float)
for line_name, line in electric_grid_data.electric_grid_lines.iterrows():
# Obtain branch index.
branch_index = mesmo.utils.get_index(self.branches, branch_type='line', branch_name=line_name)
# Insert rated power into branch power vector.
self.branch_power_vector_magnitude_reference[branch_index] = (
line.at['maximum_current']
* electric_grid_data.electric_grid_nodes.at[line.at['node_1_name'], 'voltage']
/ np.sqrt(3)
)
for transformer_name, transformer in electric_grid_data.electric_grid_transformers.iterrows():
# Obtain branch index.
branch_index = mesmo.utils.get_index(self.branches, branch_type='transformer', branch_name=transformer_name)
# Insert rated power into branch flow vector.
self.branch_power_vector_magnitude_reference[branch_index] = (
transformer.at['apparent_power']
/ len(branch_index) # Divide total capacity by number of phases.
)
# Obtain reference / nominal DER power vector.
self.der_power_vector_reference = (
(
electric_grid_data.electric_grid_ders.loc[:, 'active_power_nominal']
+ 1.0j * electric_grid_data.electric_grid_ders.loc[:, 'reactive_power_nominal']
).values
)
# Obtain flag for single-phase-equivalent modelling.
if electric_grid_data.electric_grid.at['is_single_phase_equivalent'] == 1:
if len(self.phases) != 1:
raise ValueError(f"Cannot model electric grid with {len(self.phases)} phase as single-phase-equivalent.")
self.is_single_phase_equivalent = True
else:
self.is_single_phase_equivalent = False
# Make modifications for single-phase-equivalent modelling.
if self.is_single_phase_equivalent:
self.branch_power_vector_magnitude_reference[mesmo.utils.get_index(self.branches, branch_type='line')] *= 3
@staticmethod
def process_line_types_overhead(
electric_grid_data: mesmo.data_interface.ElectricGridData
) -> mesmo.data_interface.ElectricGridData:
"""Process overhead line type definitions in electric grid data object."""
# Process over-head line type definitions.
for line_type, line_type_data in electric_grid_data.electric_grid_line_types_overhead.iterrows():
# Obtain data shorthands.
# - Only for phases which have `conductor_id` defined in `electric_grid_line_types_overhead`.
phases = (
pd.Index([
1 if pd.notnull(line_type_data.at['phase_1_conductor_id']) else None,
2 if pd.notnull(line_type_data.at['phase_2_conductor_id']) else None,
3 if pd.notnull(line_type_data.at['phase_3_conductor_id']) else None,
'n' if pd.notnull(line_type_data.at['neutral_conductor_id']) else None
]).dropna()
)
phase_conductor_id = (
pd.Series({
1: line_type_data.at['phase_1_conductor_id'],
2: line_type_data.at['phase_2_conductor_id'],
3: line_type_data.at['phase_3_conductor_id'],
'n': line_type_data.at['neutral_conductor_id']
}).loc[phases]
)
phase_y = (
pd.Series({
1: line_type_data.at['phase_1_y'],
2: line_type_data.at['phase_2_y'],
3: line_type_data.at['phase_3_y'],
'n': line_type_data.at['neutral_y']
}).loc[phases]
)
phase_xy = (
pd.Series({
1: np.array([line_type_data.at['phase_1_x'], line_type_data.at['phase_1_y']]),
2: np.array([line_type_data.at['phase_2_x'], line_type_data.at['phase_2_y']]),
3: np.array([line_type_data.at['phase_3_x'], line_type_data.at['phase_3_y']]),
'n': np.array([line_type_data.at['neutral_x'], line_type_data.at['neutral_y']])
}).loc[phases]
)
phase_conductor_diameter = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_diameter'
]
for phase in phases
], index=phases)
* 1e-3 # mm to m.
)
phase_conductor_geometric_mean_radius = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_geometric_mean_radius'
]
for phase in phases
], index=phases)
* 1e-3 # mm to m.
)
phase_conductor_resistance = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_resistance'
]
for phase in phases
], index=phases)
)
phase_conductor_maximum_current = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_maximum_current'
]
for phase in phases
], index=phases)
)
# Obtain shorthands for neutral / non-neutral phases.
# - This is needed for Kron reduction.
phases_neutral = phases[phases.isin(['n'])]
phases_non_neutral = phases[~phases.isin(['n'])]
# Other parameter shorthands.
frequency = electric_grid_data.electric_grid.at['base_frequency'] # In Hz.
earth_resistivity = line_type_data.at['earth_resistivity'] # In Ωm.
air_permittivity = line_type_data.at['air_permittivity'] # In nF/km.
g_factor = 1e-4 # In Ω/km from 0.1609347e-3 Ω/mile from Kersting <https://doi.org/10.1201/9781315120782>.
# Obtain impedance matrix in Ω/km based on Kersting <https://doi.org/10.1201/9781315120782>.
z_matrix = pd.DataFrame(index=phases, columns=phases, dtype=complex)
for phase_row, phase_col in itertools.product(phases, phases):
# Calculate geometric parameters.
d_distance = np.linalg.norm(phase_xy.at[phase_row] - phase_xy.at[phase_col])
s_distance = np.linalg.norm(phase_xy.at[phase_row] - np.array([1, -1]) * phase_xy.at[phase_col])
s_angle = np.pi / 2 - np.arcsin((phase_y.at[phase_row] + phase_y.at[phase_col]) / s_distance)
# Calculate Kersting / Carson parameters.
k_factor = (
8.565e-4 * s_distance * np.sqrt(frequency / earth_resistivity)
)
p_factor = (
np.pi / 8
- (3 * np.sqrt(2)) ** -1 * k_factor * np.cos(s_angle)
- k_factor ** 2 / 16 * np.cos(2 * s_angle) * (0.6728 + np.log(2 / k_factor))
)
q_factor = (
-0.0386
+ 0.5 * np.log(2 / k_factor)
+ (3 * np.sqrt(2)) ** -1 * k_factor * np.cos(2 * s_angle)
)
x_factor = (
2 * np.pi * frequency * g_factor
* np.log(
phase_conductor_diameter[phase_row]
/ phase_conductor_geometric_mean_radius.at[phase_row]
)
)
# Calculate admittance according to Kersting / Carson <https://doi.org/10.1201/9781315120782>.
if phase_row == phase_col:
z_matrix.at[phase_row, phase_col] = (
phase_conductor_resistance.at[phase_row]
+ 4 * np.pi * frequency * p_factor * g_factor
+ 1j * (
x_factor
+ 2 * np.pi * frequency * g_factor
* np.log(s_distance / phase_conductor_diameter[phase_row])
+ 4 * np.pi * frequency * q_factor * g_factor
)
)
else:
z_matrix.at[phase_row, phase_col] = (
4 * np.pi * frequency * p_factor * g_factor
+ 1j * (
2 * np.pi * frequency * g_factor
* np.log(s_distance / d_distance)
+ 4 * np.pi * frequency * q_factor * g_factor
)
)
# Apply Kron reduction.
z_matrix = (
pd.DataFrame(
(
z_matrix.loc[phases_non_neutral, phases_non_neutral].values
- z_matrix.loc[phases_non_neutral, phases_neutral].values
@ z_matrix.loc[phases_neutral, phases_neutral].values ** -1 # Inverse of scalar value.
@ z_matrix.loc[phases_neutral, phases_non_neutral].values
),
index=phases_non_neutral,
columns=phases_non_neutral
)
)
# Obtain potentials matrix in km/nF based on Kersting <https://doi.org/10.1201/9781315120782>.
p_matrix = pd.DataFrame(index=phases, columns=phases, dtype=float)
for phase_row, phase_col in itertools.product(phases, phases):
# Calculate geometric parameters.
d_distance = np.linalg.norm(phase_xy.at[phase_row] - phase_xy.at[phase_col])
s_distance = np.linalg.norm(phase_xy.at[phase_row] - np.array([1, -1]) * phase_xy.at[phase_col])
# Calculate potential according to Kersting <https://doi.org/10.1201/9781315120782>.
if phase_row == phase_col:
p_matrix.at[phase_row, phase_col] = (
1 / (2 * np.pi * air_permittivity)
* np.log(s_distance / phase_conductor_diameter.at[phase_row])
)
else:
p_matrix.at[phase_row, phase_col] = (
1 / (2 * np.pi * air_permittivity)
* np.log(s_distance / d_distance)
)
# Apply Kron reduction.
p_matrix = (
pd.DataFrame(
(
p_matrix.loc[phases_non_neutral, phases_non_neutral].values
- p_matrix.loc[phases_non_neutral, phases_neutral].values
@ p_matrix.loc[phases_neutral, phases_neutral].values ** -1 # Inverse of scalar value.
@ p_matrix.loc[phases_neutral, phases_non_neutral].values
),
index=phases_non_neutral,
columns=phases_non_neutral
)
)
# Obtain capacitance matrix in nF/km.
c_matrix = pd.DataFrame(np.linalg.inv(p_matrix), index=phases_non_neutral, columns=phases_non_neutral)
# Obtain final element matrices.
resistance_matrix = z_matrix.apply(np.real) # In Ω/km.
reactance_matrix = z_matrix.apply(np.imag) # In Ω/km.
capacitance_matrix = c_matrix # In nF/km.
# Add to line type matrices definition.
for phase_row in phases_non_neutral:
for phase_col in phases_non_neutral[phases_non_neutral <= phase_row]:
electric_grid_data.electric_grid_line_types_matrices = (
electric_grid_data.electric_grid_line_types_matrices.append(
pd.Series({
'line_type': line_type,
'row': phase_row,
'col': phase_col,
'resistance': resistance_matrix.at[phase_row, phase_col],
'reactance': reactance_matrix.at[phase_row, phase_col],
'capacitance': capacitance_matrix.at[phase_row, phase_col]
}),
ignore_index=True
)
)
# Obtain number of phases.
electric_grid_data.electric_grid_line_types.loc[line_type, 'n_phases'] = len(phases_non_neutral)
# Obtain maximum current.
# TODO: Validate this.
electric_grid_data.electric_grid_line_types.loc[line_type, 'maximum_current'] = (
phase_conductor_maximum_current.loc[phases_non_neutral].mean()
)
return electric_grid_data
class ElectricGridModelDefault(ElectricGridModel):
"""Electric grid model object consisting of the index sets for node names / branch names / der names / phases /
node types / branch types, the nodal admittance / transformation matrices, branch admittance /
incidence matrices and DER incidence matrices.
:syntax:
- ``ElectricGridModelDefault(electric_grid_data)``: Instantiate electric grid model for given
`electric_grid_data`.
- ``ElectricGridModelDefault(scenario_name)``: Instantiate electric grid model for given `scenario_name`.
The required `electric_grid_data` is obtained from the database.
Arguments:
electric_grid_data (mesmo.data_interface.ElectricGridData): Electric grid data object.
scenario_name (str): MESMO scenario name.
Attributes:
phases (pd.Index): Index set of the phases.
node_names (pd.Index): Index set of the node names.
node_types (pd.Index): Index set of the node types.
line_names (pd.Index): Index set of the line names.
transformer_names (pd.Index): Index set of the transformer names.
branch_names (pd.Index): Index set of the branch names, i.e., all line names and transformer names.
branch_types (pd.Index): Index set of the branch types.
der_names (pd.Index): Index set of the DER names.
der_types (pd.Index): Index set of the DER types.
nodes (pd.Index): Multi-level / tuple index set of the node types, node names and phases
corresponding to the dimension of the node admittance matrices.
branches (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
corresponding to the dimension of the branch admittance matrices.
lines (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the lines only.
transformers (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the transformers only.
ders (pd.Index): Index set of the DER names, corresponding to the dimension of the DER power vector.
node_voltage_vector_reference (np.ndarray): Node voltage reference / no load vector.
branch_power_vector_magnitude_reference (np.ndarray): Branch power reference / rated power vector.
der_power_vector_reference (np.ndarray): DER power reference / nominal power vector.
is_single_phase_equivalent (bool): Singe-phase-equivalent modelling flag. If true, electric grid is modelled
as single-phase-equivalent of three-phase balanced system.
node_admittance_matrix (sp.spmatrix): Nodal admittance matrix.
node_transformation_matrix (sp.spmatrix): Nodal transformation matrix.
branch_admittance_1_matrix (sp.spmatrix): Branch admittance matrix in the 'from' direction.
branch_admittance_2_matrix (sp.spmatrix): Branch admittance matrix in the 'to' direction.
branch_incidence_1_matrix (sp.spmatrix): Branch incidence matrix in the 'from' direction.
branch_incidence_2_matrix (sp.spmatrix): Branch incidence matrix in the 'to' direction.
der_incidence_wye_matrix (sp.spmatrix): Load incidence matrix for 'wye' DERs.
der_incidence_delta_matrix (sp.spmatrix): Load incidence matrix for 'delta' DERs.
node_admittance_matrix_no_source (sp.spmatrix): Nodal admittance matrix from no-source to no-source nodes.
node_transformation_matrix_no_source (sp.spmatrix): Nodal admittance matrix from source to no-source nodes.
der_incidence_wye_matrix_no_source (sp.spmatrix): Incidence matrix from wye-conn. DERs to no-source nodes.
der_incidence_delta_matrix_no_source (sp.spmatrix): Incidence matrix from delta-conn. DERs to no-source nodes.
node_voltage_vector_reference_no_source (sp.spmatrix): Nodal reference voltage vector for no-source nodes.
node_voltage_vector_reference_source (sp.spmatrix): Nodal reference voltage vector for source nodes.
node_admittance_matrix_no_source_inverse (sp.spmatrix): Inverse of no-source nodal admittance matrix.
"""
node_admittance_matrix: sp.spmatrix
node_transformation_matrix: sp.spmatrix
branch_admittance_1_matrix: sp.spmatrix
branch_admittance_2_matrix: sp.spmatrix
branch_incidence_1_matrix: sp.spmatrix
branch_incidence_2_matrix: sp.spmatrix
der_incidence_wye_matrix: sp.spmatrix
der_incidence_delta_matrix: sp.spmatrix
node_admittance_matrix_no_source: sp.spmatrix
node_admittance_matrix_source_to_no_source: sp.spmatrix
node_transformation_matrix_no_source: sp.spmatrix
der_incidence_wye_matrix_no_source: sp.spmatrix
der_incidence_delta_matrix_no_source: sp.spmatrix
node_voltage_vector_reference_no_source: sp.spmatrix
node_voltage_vector_reference_source: sp.spmatrix
node_admittance_matrix_no_source_inverse: sp.spmatrix
@multimethod
def __init__(
self,
scenario_name: str
):
# Obtain electric grid data.
electric_grid_data = mesmo.data_interface.ElectricGridData(scenario_name)
# Instantiate electric grid model object.
self.__init__(
electric_grid_data
)
@multimethod
def __init__(
self,
electric_grid_data: mesmo.data_interface.ElectricGridData,
):
# Obtain electric grid indexes, via `ElectricGridModel.__init__()`.
super().__init__(electric_grid_data)
# Define sparse matrices for nodal admittance, nodal transformation,
# branch admittance, branch incidence and der incidence matrix entries.
self.node_admittance_matrix = (
sp.dok_matrix((len(self.nodes), len(self.nodes)), dtype=complex)
)
self.node_transformation_matrix = (
sp.dok_matrix((len(self.nodes), len(self.nodes)), dtype=int)
)
self.branch_admittance_1_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=complex)
)
self.branch_admittance_2_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=complex)
)
self.branch_incidence_1_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=int)
)
self.branch_incidence_2_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=int)
)
self.der_incidence_wye_matrix = (
sp.dok_matrix((len(self.nodes), len(self.ders)), dtype=float)
)
self.der_incidence_delta_matrix = (
sp.dok_matrix((len(self.nodes), len(self.ders)), dtype=float)
)
# Add lines to admittance, transformation and incidence matrices.
for line_index, line in electric_grid_data.electric_grid_lines.iterrows():
# Obtain phases vector.
phases_vector = mesmo.utils.get_element_phases_array(line)
# Obtain line resistance / reactance / capacitance matrix entries for the line.
matrices_index = (
electric_grid_data.electric_grid_line_types_matrices.loc[:, 'line_type'] == line['line_type']
)
resistance_matrix = (
electric_grid_data.electric_grid_line_types_matrices.loc[matrices_index, 'resistance'].values
)
reactance_matrix = (
electric_grid_data.electric_grid_line_types_matrices.loc[matrices_index, 'reactance'].values
)
capacitance_matrix = (
electric_grid_data.electric_grid_line_types_matrices.loc[matrices_index, 'capacitance'].values
)
# Obtain the full line resistance and reactance matrices.
# Data only contains upper half entries.
matrices_full_index = (
np.array([
[1, 2, 4],
[2, 3, 5],
[4, 5, 6]
]) - 1
)
matrices_full_index = (
matrices_full_index[:len(phases_vector), :len(phases_vector)]
)
resistance_matrix = resistance_matrix[matrices_full_index]
reactance_matrix = reactance_matrix[matrices_full_index]
capacitance_matrix = capacitance_matrix[matrices_full_index]
# Construct line series admittance matrix.
series_admittance_matrix = (
np.linalg.inv(
(resistance_matrix + 1j * reactance_matrix)
* line['length']
)
)
# Construct line shunt admittance.
# Note: nF to Ω with X = 1 / (2π * f * C)
# TODO: Check line shunt admittance.
shunt_admittance_matrix = (
capacitance_matrix
* 2 * np.pi * electric_grid_data.electric_grid.at['base_frequency'] * 1e-9
* 0.5j
* line['length']
)
# Construct line element admittance matrices according to:
# https://doi.org/10.1109/TPWRS.2017.2728618
admittance_matrix_11 = (
series_admittance_matrix
+ shunt_admittance_matrix
)
admittance_matrix_12 = (
- series_admittance_matrix
)
admittance_matrix_21 = (
- series_admittance_matrix
)
admittance_matrix_22 = (
series_admittance_matrix
+ shunt_admittance_matrix
)
# Obtain indexes for positioning the line element matrices
# in the full admittance matrices.
node_index_1 = (
mesmo.utils.get_index(
self.nodes,
node_name=line['node_1_name'],
phase=phases_vector
)
)
node_index_2 = (
mesmo.utils.get_index(
self.nodes,
node_name=line['node_2_name'],
phase=phases_vector
)
)
branch_index = (
mesmo.utils.get_index(
self.branches,
branch_type='line',
branch_name=line['line_name']
)
)
# Add line element matrices to the nodal admittance matrix.
self.node_admittance_matrix[np.ix_(node_index_1, node_index_1)] += admittance_matrix_11
self.node_admittance_matrix[np.ix_(node_index_1, node_index_2)] += admittance_matrix_12
self.node_admittance_matrix[np.ix_(node_index_2, node_index_1)] += admittance_matrix_21
self.node_admittance_matrix[np.ix_(node_index_2, node_index_2)] += admittance_matrix_22
# Add line element matrices to the branch admittance matrices.
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_11
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_12
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_21
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_22
# Add line element matrices to the branch incidence matrices.
self.branch_incidence_1_matrix[np.ix_(branch_index, node_index_1)] += (
np.identity(len(branch_index), dtype=int)
)
self.branch_incidence_2_matrix[np.ix_(branch_index, node_index_2)] += (
np.identity(len(branch_index), dtype=int)
)
# Add transformers to admittance, transformation and incidence matrices.
# - Note: This setup only works for transformers with exactly two windings
# and identical number of phases at each winding / side.
# Define transformer factor matrices according to:
# https://doi.org/10.1109/TPWRS.2017.2728618
transformer_factors_1 = (
np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
)
transformer_factors_2 = (
1 / 3
* np.array([
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 2]
])
)
transformer_factors_3 = (
1 / np.sqrt(3)
* np.array([
[-1, 1, 0],
[0, -1, 1],
[1, 0, -1]
])
)
# Add transformers to admittance matrix.
for transformer_index, transformer in electric_grid_data.electric_grid_transformers.iterrows():
# Raise error if transformer nominal power is not valid.
if not (transformer.at['apparent_power'] > 0):
raise ValueError(
f"At transformer '{transformer.at['transformer_name']}', "
f"found invalid value for `apparent_power`: {transformer.at['apparent_power']}`"
)
# Calculate transformer admittance.
admittance = (
(
(
2 * transformer.at['resistance_percentage'] / 100
+ 1j * transformer.at['reactance_percentage'] / 100
)
* (
electric_grid_data.electric_grid_nodes.at[transformer.at['node_2_name'], 'voltage'] ** 2
/ transformer.at['apparent_power']
)
) ** -1
)
# Calculate turn ratio.
turn_ratio = (
(
1.0 # TODO: Replace `1.0` with actual tap position.
* electric_grid_data.electric_grid_nodes.at[transformer.at['node_1_name'], 'voltage']
)
/ (
1.0 # TODO: Replace `1.0` with actual tap position.
* electric_grid_data.electric_grid_nodes.at[transformer.at['node_2_name'], 'voltage']
)
)
# Construct transformer element admittance matrices according to:
# https://doi.org/10.1109/TPWRS.2017.2728618
if transformer.at['connection'] == "wye-wye":
admittance_matrix_11 = (
admittance
* transformer_factors_1
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* transformer_factors_1
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* transformer_factors_1
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_1
)
elif transformer.at['connection'] == "delta-wye":
admittance_matrix_11 = (
admittance
* transformer_factors_2
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* - 1 * np.transpose(transformer_factors_3)
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* - 1 * transformer_factors_3
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_1
)
elif transformer.at['connection'] == "wye-delta":
admittance_matrix_11 = (
admittance
* transformer_factors_1
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* - 1 * transformer_factors_3
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* - 1 * np.transpose(transformer_factors_3)
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_2
)
elif transformer.at['connection'] == "delta-delta":
admittance_matrix_11 = (
admittance
* transformer_factors_2
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* transformer_factors_2
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* transformer_factors_2
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_2
)
else:
raise ValueError(f"Unknown transformer type: {transformer.at['connection']}")
# Obtain phases vector.
phases_vector = mesmo.utils.get_element_phases_array(transformer)
# Obtain element admittance matrices for correct phases.
admittance_matrix_11 = (
admittance_matrix_11[np.ix_(phases_vector - 1, phases_vector - 1)]
)
admittance_matrix_12 = (
admittance_matrix_12[np.ix_(phases_vector - 1, phases_vector - 1)]
)
admittance_matrix_21 = (
admittance_matrix_21[np.ix_(phases_vector - 1, phases_vector - 1)]
)
admittance_matrix_22 = (
admittance_matrix_22[np.ix_(phases_vector - 1, phases_vector - 1)]
)
# Obtain indexes for positioning the transformer element
# matrices in the full matrices.
node_index_1 = (
mesmo.utils.get_index(
self.nodes,
node_name=transformer.at['node_1_name'],
phase=phases_vector
)
)
node_index_2 = (
mesmo.utils.get_index(
self.nodes,
node_name=transformer.at['node_2_name'],
phase=phases_vector
)
)
branch_index = (
mesmo.utils.get_index(
self.branches,
branch_type='transformer',
branch_name=transformer['transformer_name']
)
)
# Add transformer element matrices to the nodal admittance matrix.
self.node_admittance_matrix[np.ix_(node_index_1, node_index_1)] += admittance_matrix_11
self.node_admittance_matrix[np.ix_(node_index_1, node_index_2)] += admittance_matrix_12
self.node_admittance_matrix[np.ix_(node_index_2, node_index_1)] += admittance_matrix_21
self.node_admittance_matrix[np.ix_(node_index_2, node_index_2)] += admittance_matrix_22
# Add transformer element matrices to the branch admittance matrices.
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_11
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_12
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_21
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_22
# Add transformer element matrices to the branch incidence matrices.
self.branch_incidence_1_matrix[np.ix_(branch_index, node_index_1)] += (
np.identity(len(branch_index), dtype=int)
)
self.branch_incidence_2_matrix[np.ix_(branch_index, node_index_2)] += (
np.identity(len(branch_index), dtype=int)
)
# Define transformation matrix according to:
# https://doi.org/10.1109/TPWRS.2018.2823277
transformation_entries = (
np.array([
[1, -1, 0],
[0, 1, -1],
[-1, 0, 1]
])
)
for node_name, node in electric_grid_data.electric_grid_nodes.iterrows():
# Obtain node phases index.
phases_index = mesmo.utils.get_element_phases_array(node) - 1
# Construct node transformation matrix.
transformation_matrix = transformation_entries[np.ix_(phases_index, phases_index)]
# Obtain index for positioning node transformation matrix in full transformation matrix.
node_index = (
mesmo.utils.get_index(
self.nodes,
node_name=node['node_name']
)
)
# Add node transformation matrix to full transformation matrix.
self.node_transformation_matrix[np.ix_(node_index, node_index)] = transformation_matrix
# Add DERs to der incidence matrix.
for der_name, der in electric_grid_data.electric_grid_ders.iterrows():
# Obtain der connection type.
connection = der['connection']
# Obtain indexes for positioning the DER in the incidence matrix.
node_index = (
mesmo.utils.get_index(
self.nodes,
node_name=der['node_name'],
phase=mesmo.utils.get_element_phases_array(der)
)
)
der_index = (
mesmo.utils.get_index(
self.ders,
der_name=der['der_name']
)
)
if connection == "wye":
# Define incidence matrix entries.
# - Wye ders are represented as balanced ders across all
# their connected phases.
incidence_matrix = (
np.ones((len(node_index), 1), dtype=float)
/ len(node_index)
)
self.der_incidence_wye_matrix[np.ix_(node_index, der_index)] = incidence_matrix
elif connection == "delta":
# Obtain phases of the delta der.
phases_list = mesmo.utils.get_element_phases_array(der).tolist()
# Select connection node based on phase arrangement of delta der.
# TODO: Why no multi-phase delta DERs?
# - Delta DERs must be single-phase.
if phases_list in ([1, 2], [2, 3]):
node_index = [node_index[0]]
elif phases_list == [1, 3]:
node_index = [node_index[1]]
else:
raise ValueError(f"Unknown delta phase arrangement: {phases_list}")
# Define incidence matrix entry.
# - Delta ders are assumed to be single-phase.
incidence_matrix = np.array([1])
self.der_incidence_delta_matrix[np.ix_(node_index, der_index)] = incidence_matrix
else:
raise ValueError(f"Unknown der connection type: {connection}")
# Make modifications for single-phase-equivalent modelling.
if self.is_single_phase_equivalent:
self.der_incidence_wye_matrix /= 3
# Note that there won't be any delta loads in the single-phase-equivalent grid.
# Convert sparse matrices for nodal admittance, nodal transformation,
# branch admittance, branch incidence and der incidence matrices.
# - Converting from DOK to CSR format for more efficient calculations
# according to <https://docs.scipy.org/doc/scipy/reference/sparse.html>.
self.node_admittance_matrix = self.node_admittance_matrix.tocsr()
self.node_transformation_matrix = self.node_transformation_matrix.tocsr()
self.branch_admittance_1_matrix = self.branch_admittance_1_matrix.tocsr()
self.branch_admittance_2_matrix = self.branch_admittance_2_matrix.tocsr()
self.branch_incidence_1_matrix = self.branch_incidence_1_matrix.tocsr()
self.branch_incidence_2_matrix = self.branch_incidence_2_matrix.tocsr()
self.der_incidence_wye_matrix = self.der_incidence_wye_matrix.tocsr()
self.der_incidence_delta_matrix = self.der_incidence_delta_matrix.tocsr()
# Define shorthands for no-source variables.
# TODO: Add in class documentation.
# TODO: Replace local variables in power flow / linear models.
self.node_admittance_matrix_no_source = (
self.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
mesmo.utils.get_index(self.nodes, node_type='no_source')
)]
)
self.node_admittance_matrix_source_to_no_source = (
self.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
mesmo.utils.get_index(self.nodes, node_type='source')
)]
)
self.node_transformation_matrix_no_source = (
self.node_transformation_matrix[np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
mesmo.utils.get_index(self.nodes, node_type='no_source')
)]
)
self.der_incidence_wye_matrix_no_source = (
self.der_incidence_wye_matrix[
np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
range(len(self.ders))
)
]
)
self.der_incidence_delta_matrix_no_source = (
self.der_incidence_delta_matrix[
np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
range(len(self.ders))
)
]
)
self.node_voltage_vector_reference_no_source = (
self.node_voltage_vector_reference[
mesmo.utils.get_index(self.nodes, node_type='no_source')
]
)
self.node_voltage_vector_reference_source = (
self.node_voltage_vector_reference[
mesmo.utils.get_index(self.nodes, node_type='source')
]
)
# Calculate inverse of no-source node admittance matrix.
# - Raise error if not invertible.
# - Only checking invertibility of no-source node admittance matrix, because full node admittance matrix may
# be non-invertible, e.g. zero entries when connecting a multi-phase line at three-phase source node.
try:
self.node_admittance_matrix_no_source_inverse = (
scipy.sparse.linalg.inv(self.node_admittance_matrix_no_source.tocsc())
)
assert not np.isnan(self.node_admittance_matrix_no_source_inverse.data).any()
except (RuntimeError, AssertionError) as exception:
raise (
ValueError(f"Node admittance matrix could not be inverted. Please check electric grid definition.")
) from exception
class ElectricGridModelOpenDSS(ElectricGridModel):
"""OpenDSS electric grid model object.
- Instantiate OpenDSS circuit by running generating OpenDSS commands corresponding to given `electric_grid_data`,
utilizing the `OpenDSSDirect.py` package.
- The OpenDSS circuit can be accessed with the API of
`OpenDSSDirect.py`: http://dss-extensions.org/OpenDSSDirect.py/opendssdirect.html
- Due to dependency on `OpenDSSDirect.py`, creating multiple objects of this type may result in erroneous behavior.
:syntax:
- ``ElectricGridModelOpenDSS(electric_grid_data)``: Initialize OpenDSS circuit model for given
`electric_grid_data`.
- ``ElectricGridModelOpenDSS(scenario_name)`` Initialize OpenDSS circuit model for given `scenario_name`.
The required `electric_grid_data` is obtained from the database.
Parameters:
scenario_name (str): MESMO scenario name.
electric_grid_data (mesmo.data_interface.ElectricGridData): Electric grid data object.
Attributes:
phases (pd.Index): Index set of the phases.
node_names (pd.Index): Index set of the node names.
node_types (pd.Index): Index set of the node types.
line_names (pd.Index): Index set of the line names.
transformer_names (pd.Index): Index set of the transformer names.
branch_names (pd.Index): Index set of the branch names, i.e., all line names and transformer names.
branch_types (pd.Index): Index set of the branch types.
der_names (pd.Index): Index set of the DER names.
der_types (pd.Index): Index set of the DER types.
nodes (pd.Index): Multi-level / tuple index set of the node types, node names and phases
corresponding to the dimension of the node admittance matrices.
branches (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
corresponding to the dimension of the branch admittance matrices.
lines (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the lines only.
transformers (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the transformers only.
ders (pd.Index): Index set of the DER names, corresponding to the dimension of the DER power vector.
node_voltage_vector_reference (np.ndarray): Node voltage reference / no load vector.
branch_power_vector_magnitude_reference (np.ndarray): Branch power reference / rated power vector.
der_power_vector_reference (np.ndarray): DER power reference / nominal power vector.
is_single_phase_equivalent (bool): Singe-phase-equivalent modelling flag. If true, electric grid is modelled
as single-phase-equivalent of three-phase balanced system.
circuit_name (str): Circuit name, stored for validation that the correct OpenDSS model is being accessed.
electric_grid_data: (mesmo.data_interface.ElectricGridData): Electric grid data object, stored for
possible reinitialization of the OpenDSS model.
"""
circuit_name: str
electric_grid_data: mesmo.data_interface.ElectricGridData
@multimethod
def __init__(
self,
scenario_name: str
):
# Obtain electric grid data.
electric_grid_data = (
mesmo.data_interface.ElectricGridData(scenario_name)
)
self.__init__(
electric_grid_data
)
@multimethod
def __init__(
self,
electric_grid_data: mesmo.data_interface.ElectricGridData
):
# TODO: Add reset method to ensure correct circuit model is set in OpenDSS when handling multiple models.
# Obtain electric grid indexes, via `ElectricGridModel.__init__()`.
super().__init__(electric_grid_data)
# Obtain circuit name.
self.circuit_name = electric_grid_data.electric_grid.at['electric_grid_name']
# Store electric grid data.
self.electric_grid_data = electric_grid_data
# Clear OpenDSS.
opendss_command_string = "clear"
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Obtain source voltage.
source_voltage = (
electric_grid_data.electric_grid_nodes.at[
electric_grid_data.electric_grid.at['source_node_name'],
'voltage'
]
)
# Adjust source voltage for single-phase, non-single-phase-equivalent modelling.
if (len(self.phases) == 1) and not self.is_single_phase_equivalent:
source_voltage /= np.sqrt(3)
# Add circuit info to OpenDSS command string.
opendss_command_string = (
f"set defaultbasefrequency={electric_grid_data.electric_grid.at['base_frequency']}"
+ f"\nnew circuit.{self.circuit_name}"
+ f" phases={len(self.phases)}"
+ f" bus1={electric_grid_data.electric_grid.at['source_node_name']}"
+ f" basekv={source_voltage / 1000}"
+ f" mvasc3=9999999999 9999999999" # Set near-infinite power limit for source node.
)
# Create circuit in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define line codes.
for line_type_index, line_type in electric_grid_data.electric_grid_line_types.iterrows():
# Obtain line resistance and reactance matrix entries for the line.
matrices = (
electric_grid_data.electric_grid_line_types_matrices.loc[
(
electric_grid_data.electric_grid_line_types_matrices.loc[:, 'line_type']
== line_type.at['line_type']
),
['resistance', 'reactance', 'capacitance']
]
)
# Obtain number of phases.
# - Only define as line types for as many phases as needed for current grid.
n_phases = min(line_type.at['n_phases'], len(self.phases))
# Add line type name and number of phases to OpenDSS command string.
opendss_command_string = (
f"new linecode.{line_type.at['line_type']}"
+ f" nphases={n_phases}"
)
# Add resistance and reactance matrix entries to OpenDSS command string,
# with formatting depending on number of phases.
if n_phases == 1:
opendss_command_string += (
" rmatrix = "
+ "[{:.8f}]".format(*matrices.loc[:, 'resistance'])
+ " xmatrix = "
+ "[{:.8f}]".format(*matrices.loc[:, 'reactance'])
+ " cmatrix = "
+ "[{:.8f}]".format(*matrices.loc[:, 'capacitance'])
)
elif n_phases == 2:
opendss_command_string += (
" rmatrix = "
+ "[{:.8f} | {:.8f} {:.8f}]".format(*matrices.loc[:, 'resistance'])
+ " xmatrix = "
+ "[{:.8f} | {:.8f} {:.8f}]".format(*matrices.loc[:, 'reactance'])
+ " cmatrix = "
+ "[{:.8f} | {:.8f} {:.8f}]".format(*matrices.loc[:, 'capacitance'])
)
elif n_phases == 3:
opendss_command_string += (
" rmatrix = "
+ "[{:.8f} | {:.8f} {:.8f} | {:.8f} {:.8f} {:.8f}]".format(*matrices.loc[:, 'resistance'])
+ f" xmatrix = "
+ "[{:.8f} | {:.8f} {:.8f} | {:.8f} {:.8f} {:.8f}]".format(*matrices.loc[:, 'reactance'])
+ f" cmatrix = "
+ "[{:.8f} | {:.8f} {:.8f} | {:.8f} {:.8f} {:.8f}]".format(*matrices.loc[:, 'capacitance'])
)
# Create line code in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define lines.
for line_index, line in electric_grid_data.electric_grid_lines.iterrows():
# Obtain number of phases for the line.
n_phases = len(mesmo.utils.get_element_phases_array(line))
# Add line name, phases, node connections, line type and length
# to OpenDSS command string.
opendss_command_string = (
f"new line.{line['line_name']}"
+ f" phases={n_phases}"
+ f" bus1={line['node_1_name']}{mesmo.utils.get_element_phases_string(line)}"
+ f" bus2={line['node_2_name']}{mesmo.utils.get_element_phases_string(line)}"
+ f" linecode={line['line_type']}"
+ f" length={line['length']}"
)
# Create line in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define transformers.
for transformer_index, transformer in electric_grid_data.electric_grid_transformers.iterrows():
# Obtain number of phases.
n_phases = len(mesmo.utils.get_element_phases_array(transformer))
# Add transformer name, number of phases / windings and reactances to OpenDSS command string.
opendss_command_string = (
f"new transformer.{transformer.at['transformer_name']}"
+ f" phases={n_phases}"
+ f" windings=2"
+ f" xscarray=[{transformer.at['reactance_percentage']}]"
)
# Add windings to OpenDSS command string.
windings = [1, 2]
for winding in windings:
# Obtain nominal voltage level for each winding.
voltage = electric_grid_data.electric_grid_nodes.at[transformer.at[f'node_{winding}_name'], 'voltage']
# Obtain node phases connection string for each winding.
connection = transformer.at['connection'].split('-')[winding - 1]
if connection == "wye":
node_phases_string = (
mesmo.utils.get_element_phases_string(transformer)
+ ".0" # Enforce wye-grounded connection.
)
elif connection == "delta":
node_phases_string = (
mesmo.utils.get_element_phases_string(transformer)
)
else:
raise ValueError(f"Unknown transformer connection type: {connection}")
# Add node connection, nominal voltage / power, resistance and maximum / minimum tap level
# to OpenDSS command string for each winding.
opendss_command_string += (
f" wdg={winding}"
+ f" bus={transformer.at[f'node_{winding}_name']}" + node_phases_string
+ f" conn={connection}"
+ f" kv={voltage / 1000}"
+ f" kva={transformer.at['apparent_power'] / 1000}"
+ f" %r={transformer.at['resistance_percentage']}"
+ f" maxtap="
+ f"{transformer.at['tap_maximum_voltage_per_unit']}"
+ f" mintap="
+ f"{transformer.at['tap_minimum_voltage_per_unit']}"
)
# Create transformer in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define DERs.
# TODO: At the moment, all DERs are modelled as loads in OpenDSS.
for der_index, der in electric_grid_data.electric_grid_ders.iterrows():
# Obtain number of phases for the DER.
n_phases = len(mesmo.utils.get_element_phases_array(der))
# Obtain nominal voltage level for the DER.
voltage = electric_grid_data.electric_grid_nodes.at[der['node_name'], 'voltage']
# Convert to line-to-neutral voltage for single-phase DERs, according to:
# https://sourceforge.net/p/electricdss/discussion/861976/thread/9c9e0efb/
# - Not needed for single-phase-equivalent modelling.
if (n_phases == 1) and not self.is_single_phase_equivalent:
voltage /= np.sqrt(3)
# Add explicit ground-phase connection for single-phase, wye DERs, according to:
# https://sourceforge.net/p/electricdss/discussion/861976/thread/d420e8fb/
# - This does not seem to make a difference if omitted, but is kept here to follow the recommendation.
# - Not needed for single-phase-equivalent modelling.
if (n_phases == 1) and (der['connection'] == 'wye') and not self.is_single_phase_equivalent:
ground_phase_string = ".0"
else:
ground_phase_string = ""
# Add node connection, model type, voltage, nominal power to OpenDSS command string.
opendss_command_string = (
f"new load.{der['der_name']}"
+ f" bus1={der['node_name']}{ground_phase_string}{mesmo.utils.get_element_phases_string(der)}"
+ f" phases={n_phases}"
+ f" conn={der['connection']}"
# All loads are modelled as constant P/Q according to:
# OpenDSS Manual April 2018, page 150, "Model"
+ f" model=1"
+ f" kv={voltage / 1000}"
+ f" kw={- der['active_power_nominal'] / 1000}"
+ f" kvar={- der['reactive_power_nominal'] / 1000}"
# Set low V_min to avoid switching to impedance model according to:
# OpenDSS Manual April 2018, page 150, "Vminpu"
+ f" vminpu=0.6"
# Set high V_max to avoid switching to impedance model according to:
# OpenDSS Manual April 2018, page 150, "Vmaxpu"
+ f" vmaxpu=1.4"
)
# Create DER in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Obtain voltage bases.
voltage_bases = (
np.unique(
electric_grid_data.electric_grid_nodes.loc[:, 'voltage'].values / 1000
).tolist()
)
# Set control mode and voltage bases.
opendss_command_string = (
f"set voltagebases={voltage_bases}"
+ f"\nset controlmode=off"
+ f"\ncalcvoltagebases"
)
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Set solution mode to "single snapshot power flow" according to:
# OpenDSSComDoc, November 2016, page 1
opendss_command_string = "set mode=0"
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
class ElectricGridDEROperationResults(mesmo.utils.ResultsBase):
der_active_power_vector: pd.DataFrame
der_active_power_vector_per_unit: pd.DataFrame
der_reactive_power_vector: pd.DataFrame
der_reactive_power_vector_per_unit: pd.DataFrame
class ElectricGridOperationResults(ElectricGridDEROperationResults):
electric_grid_model: ElectricGridModel
node_voltage_magnitude_vector: pd.DataFrame
node_voltage_magnitude_vector_per_unit: pd.DataFrame
node_voltage_angle_vector: pd.DataFrame
branch_power_magnitude_vector_1: pd.DataFrame
branch_power_magnitude_vector_1_per_unit: pd.DataFrame
branch_active_power_vector_1: pd.DataFrame
branch_active_power_vector_1_per_unit: pd.DataFrame
branch_reactive_power_vector_1: pd.DataFrame
branch_reactive_power_vector_1_per_unit: pd.DataFrame
branch_power_magnitude_vector_2: pd.DataFrame
branch_power_magnitude_vector_2_per_unit: pd.DataFrame
branch_active_power_vector_2: pd.DataFrame
branch_active_power_vector_2_per_unit: pd.DataFrame
branch_reactive_power_vector_2: pd.DataFrame
branch_reactive_power_vector_2_per_unit: pd.DataFrame
loss_active: pd.DataFrame
loss_reactive: pd.DataFrame
class ElectricGridDLMPResults(mesmo.utils.ResultsBase):
electric_grid_energy_dlmp_node_active_power: pd.DataFrame
electric_grid_voltage_dlmp_node_active_power: pd.DataFrame
electric_grid_congestion_dlmp_node_active_power: pd.DataFrame
electric_grid_loss_dlmp_node_active_power: pd.DataFrame
electric_grid_total_dlmp_node_active_power: pd.DataFrame
electric_grid_voltage_dlmp_node_reactive_power: pd.DataFrame
electric_grid_congestion_dlmp_node_reactive_power: pd.DataFrame
electric_grid_loss_dlmp_node_reactive_power: pd.DataFrame
electric_grid_energy_dlmp_node_reactive_power: pd.DataFrame
electric_grid_total_dlmp_node_reactive_power: pd.DataFrame
electric_grid_energy_dlmp_der_active_power: pd.DataFrame
electric_grid_voltage_dlmp_der_active_power: pd.DataFrame
electric_grid_congestion_dlmp_der_active_power: pd.DataFrame
electric_grid_loss_dlmp_der_active_power: pd.DataFrame
electric_grid_total_dlmp_der_active_power: pd.DataFrame
electric_grid_voltage_dlmp_der_reactive_power: pd.DataFrame
electric_grid_congestion_dlmp_der_reactive_power: pd.DataFrame
electric_grid_loss_dlmp_der_reactive_power: pd.DataFrame
electric_grid_energy_dlmp_der_reactive_power: pd.DataFrame
electric_grid_total_dlmp_der_reactive_power: pd.DataFrame
electric_grid_total_dlmp_price_timeseries: pd.DataFrame
class PowerFlowSolution(mesmo.utils.ObjectBase):
"""Power flow solution object consisting of DER power vector and the corresponding solution for
nodal voltage vector / branch power vector and total loss (all complex valued).
"""
der_power_vector: np.ndarray
node_voltage_vector: np.ndarray
branch_power_vector_1: np.ndarray
branch_power_vector_2: np.ndarray
loss: complex
class PowerFlowSolutionFixedPoint(PowerFlowSolution):
"""Fixed point power flow solution object."""
@multimethod
def __init__(
self,
scenario_name: str,
**kwargs
):
# Obtain `electric_grid_model`.
electric_grid_model = ElectricGridModelDefault(scenario_name)
self.__init__(
electric_grid_model,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
**kwargs
):
# Obtain `der_power_vector`, assuming nominal power conditions.
der_power_vector = electric_grid_model.der_power_vector_reference
self.__init__(
electric_grid_model,
der_power_vector,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
der_power_vector: np.ndarray,
**kwargs
):
# Store DER power vector.
self.der_power_vector = der_power_vector.ravel()
# Obtain voltage solution.
self.node_voltage_vector = (
self.get_voltage(
electric_grid_model,
self.der_power_vector,
**kwargs
)
)
# Obtain branch flow solution.
(
self.branch_power_vector_1,
self.branch_power_vector_2
) = (
self.get_branch_power(
electric_grid_model,
self.node_voltage_vector
)
)
# Obtain loss solution.
self.loss = (
self.get_loss(
electric_grid_model,
self.node_voltage_vector
)
)
@staticmethod
def check_solution_conditions(
electric_grid_model: ElectricGridModelDefault,
node_power_vector_wye_initial_no_source: np.ndarray,
node_power_vector_delta_initial_no_source: np.ndarray,
node_power_vector_wye_candidate_no_source: np.ndarray,
node_power_vector_delta_candidate_no_source: np.ndarray,
node_voltage_vector_initial_no_source: np.ndarray
) -> bool:
"""Check conditions for fixed-point solution existence, uniqueness and non-singularity for
given power vector candidate and initial point.
- Conditions are formulated according to: <https://arxiv.org/pdf/1702.03310.pdf>
- Note the performance issues of this condition check algorithm due to the
requirement for matrix inversions / solving of linear equations.
"""
# Calculate norm of the initial nodal power vector.
xi_initial = (
np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* node_power_vector_wye_initial_no_source
)
)
),
axis=1
))
+ np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(
electric_grid_model.node_transformation_matrix_no_source
* (
np.abs(electric_grid_model.node_transformation_matrix_no_source)
@ np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
) ** -1
)
* node_power_vector_delta_initial_no_source
)
)
),
axis=1
))
)
# Calculate norm of the candidate nodal power vector.
xi_candidate = (
np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* (
node_power_vector_wye_candidate_no_source
- node_power_vector_wye_initial_no_source
)
)
)
),
axis=1
))
+ np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(
electric_grid_model.node_transformation_matrix_no_source
* (
np.abs(electric_grid_model.node_transformation_matrix_no_source)
@ np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
) ** -1
) * (
node_power_vector_delta_candidate_no_source
- node_power_vector_delta_initial_no_source
)
)
)
),
axis=1
))
)
# Calculate norm of the initial nodal voltage vector.
gamma = (
np.min([
np.min(
np.abs(node_voltage_vector_initial_no_source)
/ np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
),
np.min(
np.abs(
electric_grid_model.node_transformation_matrix_no_source
* node_voltage_vector_initial_no_source
)
/ (
np.abs(electric_grid_model.node_transformation_matrix_no_source)
* np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
)
)
])
)
# Obtain conditions for solution existence, uniqueness and non-singularity.
condition_initial = (
xi_initial
<
(gamma ** 2)
)
condition_candidate = (
xi_candidate
<
(0.25 * (((gamma ** 2) - xi_initial) / gamma) ** 2)
)
is_valid = (
condition_initial
& condition_candidate
)
# If `condition_initial` is violated, the given initial nodal voltage vector and power vectors are not valid.
# This suggests an error in the problem setup and hence triggers a warning.
if ~condition_initial:
logger.warning("Fixed point solution condition is not satisfied for the provided initial point.")
return is_valid
@staticmethod
def get_voltage(
electric_grid_model: ElectricGridModelDefault,
der_power_vector: np.ndarray,
outer_iteration_limit=100,
outer_solution_algorithm='check_solution', # Choices: `check_conditions`, `check_solution`.
power_candidate_iteration_limit=100,
power_candidate_reduction_factor=0.5,
voltage_iteration_limit=100,
voltage_tolerance=1e-2
) -> np.ndarray:
"""Get nodal voltage vector by solving with the fixed point algorithm.
- Initial DER power vector / node voltage vector must be a valid
solution to te fixed-point equation, e.g., a previous solution from a past
operation point.
- Fixed point equation according to: <https://arxiv.org/pdf/1702.03310.pdf>
"""
# TODO: Add proper documentation.
# TODO: Validate fixed-point solution conditions.
# Debug message.
logger.debug("Starting fixed point solution algorithm...")
# Obtain nodal power vectors.
node_power_vector_wye_no_source = (
electric_grid_model.der_incidence_wye_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
node_power_vector_delta_no_source = (
electric_grid_model.der_incidence_delta_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
# Obtain initial nodal power and voltage vectors, assuming no power conditions.
# TODO: Enable passing previous solution for fixed-point initialization.
node_power_vector_wye_initial_no_source = np.zeros(node_power_vector_wye_no_source.shape, dtype=complex)
node_power_vector_delta_initial_no_source = np.zeros(node_power_vector_delta_no_source.shape, dtype=complex)
node_voltage_vector_initial_no_source = electric_grid_model.node_voltage_vector_reference_no_source.copy()
# Define nodal power vector candidate to the desired nodal power vector.
node_power_vector_wye_candidate_no_source = node_power_vector_wye_no_source.copy()
node_power_vector_delta_candidate_no_source = node_power_vector_delta_no_source.copy()
# Instantiate outer iteration variables.
is_final = False
outer_iteration = 0
# Outer iteration between power vector candidate selection and fixed point voltage solution algorithm
# until a final solution is found.
while (
~is_final
& (outer_iteration < outer_iteration_limit)
):
# Outer solution algorithm based on fixed-point solution conditions check.
# - Checks solution conditions and adjust power vector candidate if necessary, before solving for voltage.
if outer_solution_algorithm == 'check_conditions':
# Reset nodal power vector candidate to the desired nodal power vector.
node_power_vector_wye_candidate_no_source = node_power_vector_wye_no_source.copy()
node_power_vector_delta_candidate_no_source = node_power_vector_delta_no_source.copy()
# Check solution conditions for nodal power vector candidate.
is_final = (
PowerFlowSolutionFixedPoint.check_solution_conditions(
electric_grid_model,
node_power_vector_wye_initial_no_source,
node_power_vector_delta_initial_no_source,
node_power_vector_wye_candidate_no_source,
node_power_vector_delta_candidate_no_source,
node_voltage_vector_initial_no_source
)
)
# Instantiate power candidate iteration variable.
power_candidate_iteration = 0
is_valid = is_final.copy()
# If solution conditions are violated, iteratively reduce power to find a power vector candidate
# which satisfies the solution conditions.
while (
~is_valid
& (power_candidate_iteration < power_candidate_iteration_limit)
):
# Reduce nodal power vector candidate.
node_power_vector_wye_candidate_no_source -= (
power_candidate_reduction_factor
* (
node_power_vector_wye_candidate_no_source
- node_power_vector_wye_initial_no_source
)
)
node_power_vector_delta_candidate_no_source -= (
power_candidate_reduction_factor
* (
node_power_vector_delta_candidate_no_source
- node_power_vector_delta_initial_no_source
)
)
is_valid = (
PowerFlowSolutionFixedPoint.check_solution_conditions(
electric_grid_model,
node_power_vector_wye_initial_no_source,
node_power_vector_delta_initial_no_source,
node_power_vector_wye_candidate_no_source,
node_power_vector_delta_candidate_no_source,
node_voltage_vector_initial_no_source,
)
)
power_candidate_iteration += 1
# Reaching the iteration limit is considered undesired and triggers a warning.
if power_candidate_iteration >= power_candidate_iteration_limit:
logger.warning(
"Power vector candidate selection algorithm for fixed-point solution reached "
f"maximum limit of {power_candidate_iteration_limit} iterations."
)
# Store current candidate power vectors as initial power vectors
# for next round of computation of solution conditions.
node_power_vector_wye_initial_no_source = (
node_power_vector_wye_candidate_no_source.copy()
)
node_power_vector_delta_initial_no_source = (
node_power_vector_delta_candidate_no_source.copy()
)
# Instantiate fixed point iteration variables.
voltage_iteration = 0
voltage_change = np.inf
while (
(voltage_iteration < voltage_iteration_limit)
& (voltage_change > voltage_tolerance)
):
# Calculate fixed point equation.
node_voltage_vector_estimate_no_source = (
np.transpose([electric_grid_model.node_voltage_vector_reference_no_source])
+ np.transpose([
scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(
(
np.conj(np.transpose([node_voltage_vector_initial_no_source])) ** -1
)
* np.conj(np.transpose([node_power_vector_wye_candidate_no_source]))
)
+ (
np.transpose(electric_grid_model.node_transformation_matrix_no_source)
@ (
(
(
electric_grid_model.node_transformation_matrix_no_source
@ np.conj(np.transpose([node_voltage_vector_initial_no_source]))
) ** -1
)
* np.conj(np.transpose([node_power_vector_delta_candidate_no_source]))
)
)
)
)
])
).ravel()
# Calculate voltage change from previous iteration.
voltage_change = (
np.max(np.abs(
node_voltage_vector_estimate_no_source
- node_voltage_vector_initial_no_source
))
)
# Set voltage solution as initial voltage for next iteration.
node_voltage_vector_initial_no_source = node_voltage_vector_estimate_no_source.copy()
# Increment voltage iteration counter.
voltage_iteration += 1
# Outer solution algorithm based on voltage solution check.
# - Checks if voltage solution exceeded iteration limit and adjusts power vector candidate if needed.
if outer_solution_algorithm == 'check_solution':
# If voltage solution exceeds iteration limit, reduce power and re-try voltage solution.
if voltage_iteration >= voltage_iteration_limit:
# Reduce nodal power vector candidate.
node_power_vector_wye_candidate_no_source *= power_candidate_reduction_factor
node_power_vector_delta_candidate_no_source *= power_candidate_reduction_factor
# Reset initial nodal voltage vector.
node_voltage_vector_initial_no_source = (
electric_grid_model.node_voltage_vector_reference_no_source.copy()
)
# Otherwise, if power has previously been reduced, raise back power and re-try voltage solution.
else:
if (
(node_power_vector_wye_candidate_no_source != node_power_vector_wye_no_source).any()
or (node_power_vector_delta_candidate_no_source != node_power_vector_delta_no_source).any()
):
# Increase nodal power vector candidate.
node_power_vector_wye_candidate_no_source *= power_candidate_reduction_factor ** -1
node_power_vector_delta_candidate_no_source *= power_candidate_reduction_factor ** -1
else:
is_final = True
# For fixed-point algorithm, reaching the iteration limit is considered undesired and triggers a warning
elif voltage_iteration >= voltage_iteration_limit:
logger.warning(
"Fixed point voltage solution algorithm reached "
f"maximum limit of {voltage_iteration_limit} iterations."
)
# Increment outer iteration counter.
outer_iteration += 1
# Reaching the outer iteration limit is considered undesired and triggers a warning.
if outer_iteration >= outer_iteration_limit:
logger.warning(
"Outer wrapper algorithm for fixed-point solution reached "
f"maximum limit of {outer_iteration_limit} iterations."
)
# Debug message.
logger.debug(
"Completed fixed point solution algorithm. "
f"Outer wrapper iterations: {outer_iteration}"
)
# Get full voltage vector.
node_voltage_vector = np.zeros(len(electric_grid_model.nodes), dtype=complex)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='source')] += (
electric_grid_model.node_voltage_vector_reference_source
)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')] += (
node_voltage_vector_initial_no_source # Takes value of `node_voltage_vector_estimate_no_source`.
)
return node_voltage_vector
@staticmethod
def get_branch_power(
electric_grid_model: ElectricGridModelDefault,
node_voltage_vector: np.ndarray
):
"""Get branch power vectors by calculating power flow with given nodal voltage.
- Returns two branch power vectors, where `branch_power_vector_1` represents the
"from"-direction and `branch_power_vector_2` represents the "to"-direction.
"""
# Obtain branch admittance and incidence matrices.
branch_admittance_1_matrix = (
electric_grid_model.branch_admittance_1_matrix
)
branch_admittance_2_matrix = (
electric_grid_model.branch_admittance_2_matrix
)
branch_incidence_1_matrix = (
electric_grid_model.branch_incidence_1_matrix
)
branch_incidence_2_matrix = (
electric_grid_model.branch_incidence_2_matrix
)
# Calculate branch power vectors.
branch_power_vector_1 = (
(
branch_incidence_1_matrix
@ np.transpose([node_voltage_vector])
)
* np.conj(
branch_admittance_1_matrix
@ np.transpose([node_voltage_vector])
)
).ravel()
branch_power_vector_2 = (
(
branch_incidence_2_matrix
@ np.transpose([node_voltage_vector])
)
* np.conj(
branch_admittance_2_matrix
@ np.transpose([node_voltage_vector])
)
).ravel()
# Make modifications for single-phase-equivalent modelling.
if electric_grid_model.is_single_phase_equivalent:
branch_power_vector_1 *= 3
branch_power_vector_2 *= 3
return (
branch_power_vector_1,
branch_power_vector_2
)
@staticmethod
def get_loss(
electric_grid_model: ElectricGridModelDefault,
node_voltage_vector: np.ndarray
):
"""Get total electric losses with given nodal voltage."""
# Calculate total losses.
# TODO: Check if summing up branch power is faster.
# loss = (
# np.sum(
# branch_power_vector_1
# + branch_power_vector_2
# )
# )
loss = (
np.array([node_voltage_vector])
@ np.conj(electric_grid_model.node_admittance_matrix)
@ np.transpose([np.conj(node_voltage_vector)])
).ravel()
# Make modifications for single-phase-equivalent modelling.
if electric_grid_model.is_single_phase_equivalent:
loss *= 3
return loss
class PowerFlowSolutionZBus(PowerFlowSolutionFixedPoint):
"""Implicit Z-bus power flow solution object."""
# Overwrite `check_solution_conditions`, which is invalid for the Z-bus power flow.
@staticmethod
def check_solution_conditions(*args, **kwargs):
raise NotImplementedError("This method is invalid for the Z-bus power flow.")
@staticmethod
def get_voltage(
electric_grid_model: ElectricGridModelDefault,
der_power_vector: np.ndarray,
voltage_iteration_limit=100,
voltage_tolerance=1e-2,
**kwargs
) -> np.ndarray:
"""Get nodal voltage vector by solving with the implicit Z-bus method."""
# Implicit Z-bus power flow solution (<NAME>).
# - “Can, Can, Lah!” (literal meaning, can accomplish)
# - <https://www.financialexpress.com/opinion/singapore-turns-50-the-remarkable-nation-that-can-lah/115775/>
# Obtain nodal power vectors.
node_power_vector_wye_no_source = (
electric_grid_model.der_incidence_wye_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
node_power_vector_delta_no_source = (
electric_grid_model.der_incidence_delta_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
# Obtain utility variables.
node_admittance_matrix_no_source_inverse = (
scipy.sparse.linalg.inv(electric_grid_model.node_admittance_matrix_no_source.tocsc())
)
node_admittance_matrix_source_to_no_source = (
electric_grid_model.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='source')
)]
)
node_voltage_vector_initial_no_source = (
electric_grid_model.node_voltage_vector_reference_no_source.copy()
)
# Instantiate implicit Z-bus power flow iteration variables.
voltage_iteration = 0
voltage_change = np.inf
while (
(voltage_iteration < voltage_iteration_limit)
& (voltage_change > voltage_tolerance)
):
# Calculate current injections.
node_current_injection_delta_in_wye_no_source = (
electric_grid_model.node_transformation_matrix_no_source.transpose()
@ np.conj(
np.linalg.inv(np.diag((
electric_grid_model.node_transformation_matrix_no_source
@ node_voltage_vector_initial_no_source
).ravel()))
@ node_power_vector_wye_no_source
)
)
node_current_injection_wye_no_source = (
np.conj(node_power_vector_delta_no_source)
/ np.conj(node_voltage_vector_initial_no_source)
)
node_current_injection_no_source = (
node_current_injection_delta_in_wye_no_source
+ node_current_injection_wye_no_source
)
# Calculate voltage.
node_voltage_vector_estimate_no_source = (
node_admittance_matrix_no_source_inverse @ (
- node_admittance_matrix_source_to_no_source
@ electric_grid_model.node_voltage_vector_reference_source
+ node_current_injection_no_source
)
)
# node_voltage_vector_estimate_no_source = (
# electric_grid_model.node_voltage_vector_reference_no_source
# + node_admittance_matrix_no_source_inverse @ node_current_injection_no_source
# )
# Calculate voltage change from previous iteration.
voltage_change = (
np.max(np.abs(
node_voltage_vector_estimate_no_source
- node_voltage_vector_initial_no_source
))
)
# Set voltage estimate as new initial voltage for next iteration.
node_voltage_vector_initial_no_source = node_voltage_vector_estimate_no_source.copy()
# Increment voltage iteration counter.
voltage_iteration += 1
# Reaching the iteration limit is considered undesired and triggers a warning.
if voltage_iteration >= voltage_iteration_limit:
logger.warning(
"Z-bus solution algorithm reached "
f"maximum limit of {voltage_iteration_limit} iterations."
)
# Get full voltage vector.
node_voltage_vector = np.zeros(len(electric_grid_model.nodes), dtype=complex)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='source')] += (
electric_grid_model.node_voltage_vector_reference_source
)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')] += (
node_voltage_vector_initial_no_source # Takes value of `node_voltage_vector_estimate_no_source`.
)
return node_voltage_vector
class PowerFlowSolutionOpenDSS(PowerFlowSolution):
"""OpenDSS power flow solution object."""
@multimethod
def __init__(
self,
scenario_name: str,
**kwargs
):
# Obtain `electric_grid_model`.
electric_grid_model = ElectricGridModelOpenDSS(scenario_name)
self.__init__(
electric_grid_model,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelOpenDSS,
**kwargs
):
# Obtain `der_power_vector`, assuming nominal power conditions.
der_power_vector = electric_grid_model.der_power_vector_reference
self.__init__(
electric_grid_model,
der_power_vector,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelOpenDSS,
der_power_vector: np.ndarray,
**kwargs
):
# Store DER power vector.
self.der_power_vector = der_power_vector.ravel()
# Check if correct OpenDSS circuit is initialized, otherwise reinitialize.
if opendssdirect.Circuit.Name() != electric_grid_model.circuit_name:
electric_grid_model.__init__(electric_grid_model.electric_grid_data)
# Set DER power vector in OpenDSS model.
for der_index, der_name in enumerate(electric_grid_model.der_names):
# TODO: For OpenDSS, all DERs are assumed to be loads.
opendss_command_string = (
f"load.{der_name}.kw = {- np.real(self.der_power_vector[der_index]) / 1000.0}"
+ f"\nload.{der_name}.kvar = {- np.imag(self.der_power_vector[der_index]) / 1000.0}"
)
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Solve OpenDSS model.
opendssdirect.run_command("solve")
# Obtain voltage solution.
self.node_voltage_vector = (
self.get_voltage(
electric_grid_model
)
)
# Obtain branch flow solution.
(
self.branch_power_vector_1,
self.branch_power_vector_2
) = (
self.get_branch_power()
)
# Obtain loss solution.
self.loss = (
self.get_loss()
)
@staticmethod
def get_voltage(
electric_grid_model: ElectricGridModelOpenDSS
):
"""Get nodal voltage vector by solving OpenDSS model.
- OpenDSS model must be readily set up, with the desired power being set for all DERs.
"""
# Create index for OpenDSS nodes.
opendss_nodes = pd.Series(opendssdirect.Circuit.AllNodeNames()).str.split('.', expand=True)
opendss_nodes.columns = ['node_name', 'phase']
opendss_nodes.loc[:, 'phase'] = opendss_nodes.loc[:, 'phase'].astype(int)
opendss_nodes = pd.MultiIndex.from_frame(opendss_nodes)
# Extract nodal voltage vector and reindex to match MESMO nodes order.
node_voltage_vector_solution = (
pd.Series(
(
np.array(opendssdirect.Circuit.AllBusVolts()[0::2])
+ 1j * np.array(opendssdirect.Circuit.AllBusVolts()[1::2])
),
index=opendss_nodes
).reindex(
electric_grid_model.nodes.droplevel('node_type')
).values
)
# Make modifications for single-phase-equivalent modelling.
if electric_grid_model.is_single_phase_equivalent:
node_voltage_vector_solution /= np.sqrt(3)
return node_voltage_vector_solution
@staticmethod
def get_branch_power():
"""Get branch power vectors by solving OpenDSS model.
- OpenDSS model must be readily set up, with the desired power being set for all DERs.
"""
# Solve OpenDSS model.
opendssdirect.run_command("solve")
# Instantiate branch vectors.
branch_power_vector_1 = (
np.full(((opendssdirect.Lines.Count() + opendssdirect.Transformers.Count()), 3), np.nan, dtype=complex)
)
branch_power_vector_2 = (
np.full(((opendssdirect.Lines.Count() + opendssdirect.Transformers.Count()), 3), np.nan, dtype=complex)
)
# Instantiate iteration variables.
branch_vector_index = 0
line_index = opendssdirect.Lines.First()
# Obtain line branch power vectors.
while line_index > 0:
branch_power_opendss = np.array(opendssdirect.CktElement.Powers()) * 1000.0
branch_phase_count = opendssdirect.CktElement.NumPhases()
branch_power_vector_1[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0:(branch_phase_count * 2):2]
+ 1.0j * branch_power_opendss[1:(branch_phase_count * 2):2]
)
branch_power_vector_2[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0 + (branch_phase_count * 2)::2]
+ 1.0j * branch_power_opendss[1 + (branch_phase_count * 2)::2]
)
branch_vector_index += 1
line_index = opendssdirect.Lines.Next()
# Obtain transformer branch power vectors.
transformer_index = opendssdirect.Transformers.First()
while transformer_index > 0:
branch_power_opendss = np.array(opendssdirect.CktElement.Powers()) * 1000.0
branch_phase_count = opendssdirect.CktElement.NumPhases()
skip_phase = 2 if 0 in opendssdirect.CktElement.NodeOrder() else 0 # Ignore ground nodes.
branch_power_vector_1[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0:(branch_phase_count * 2):2]
+ 1.0j * branch_power_opendss[1:(branch_phase_count * 2):2]
)
branch_power_vector_2[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0 + (branch_phase_count * 2) + skip_phase:-skip_phase:2]
+ 1.0j * branch_power_opendss[1 + (branch_phase_count * 2) + skip_phase:-skip_phase:2]
)
branch_vector_index += 1
transformer_index = opendssdirect.Transformers.Next()
# Reshape branch power vectors to appropriate size and remove entries for nonexistent phases.
# TODO: Sort vector by branch name if not in order.
branch_power_vector_1 = branch_power_vector_1.flatten()
branch_power_vector_2 = branch_power_vector_2.flatten()
branch_power_vector_1 = branch_power_vector_1[~np.isnan(branch_power_vector_1)]
branch_power_vector_2 = branch_power_vector_2[~np.isnan(branch_power_vector_2)]
return (
branch_power_vector_1,
branch_power_vector_2
)
@staticmethod
def get_loss():
"""Get total loss by solving OpenDSS model.
- OpenDSS model must be readily set up, with the desired power being set for all DERs.
"""
# Solve OpenDSS model.
opendssdirect.run_command("solve")
# Obtain loss.
loss = opendssdirect.Circuit.Losses()[0] + 1.0j * opendssdirect.Circuit.Losses()[1]
return loss
class PowerFlowSolutionSet(mesmo.utils.ObjectBase):
power_flow_solutions: typing.Dict[pd.Timestamp, PowerFlowSolution]
electric_grid_model: ElectricGridModelDefault
der_power_vector: pd.DataFrame
timesteps: pd.Index
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
der_operation_results: ElectricGridDEROperationResults,
**kwargs
):
der_power_vector = (
der_operation_results.der_active_power_vector
+ 1.0j * der_operation_results.der_reactive_power_vector
)
self.__init__(
electric_grid_model,
der_power_vector,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
der_power_vector: pd.DataFrame,
power_flow_solution_method=PowerFlowSolutionFixedPoint
):
# Store attributes.
self.electric_grid_model = electric_grid_model
self.der_power_vector = der_power_vector
self.timesteps = self.electric_grid_model.timesteps
# Obtain power flow solutions.
power_flow_solutions = (
mesmo.utils.starmap(
power_flow_solution_method,
zip(
itertools.repeat(self.electric_grid_model),
der_power_vector.values
)
)
)
self.power_flow_solutions = dict(zip(self.timesteps, power_flow_solutions))
def get_results(self) -> ElectricGridOperationResults:
# Instantiate results variables.
der_power_vector = (
pd.DataFrame(columns=self.electric_grid_model.ders, index=self.timesteps, dtype=complex)
)
node_voltage_vector = (
pd.DataFrame(columns=self.electric_grid_model.nodes, index=self.timesteps, dtype=complex)
)
branch_power_vector_1 = (
pd.DataFrame(columns=self.electric_grid_model.branches, index=self.timesteps, dtype=complex)
)
branch_power_vector_2 = (
pd.DataFrame(columns=self.electric_grid_model.branches, index=self.timesteps, dtype=complex)
)
loss = pd.DataFrame(columns=['total'], index=self.timesteps, dtype=complex)
# Obtain results.
for timestep in self.timesteps:
power_flow_solution = self.power_flow_solutions[timestep]
der_power_vector.loc[timestep, :] = power_flow_solution.der_power_vector
node_voltage_vector.loc[timestep, :] = power_flow_solution.node_voltage_vector
branch_power_vector_1.loc[timestep, :] = power_flow_solution.branch_power_vector_1
branch_power_vector_2.loc[timestep, :] = power_flow_solution.branch_power_vector_2
loss.loc[timestep, :] = power_flow_solution.loss
der_active_power_vector = der_power_vector.apply(np.real)
der_reactive_power_vector = der_power_vector.apply(np.imag)
node_voltage_magnitude_vector = np.abs(node_voltage_vector)
branch_power_magnitude_vector_1 = np.abs(branch_power_vector_1)
branch_power_magnitude_vector_2 = np.abs(branch_power_vector_2)
loss_active = loss.apply(np.real)
loss_reactive = loss.apply(np.imag)
# Obtain per-unit values.
der_active_power_vector_per_unit = (
der_active_power_vector
* mesmo.utils.get_inverse_with_zeros(np.real(self.electric_grid_model.der_power_vector_reference))
)
der_reactive_power_vector_per_unit = (
der_reactive_power_vector
* mesmo.utils.get_inverse_with_zeros(np.imag(self.electric_grid_model.der_power_vector_reference))
)
node_voltage_magnitude_vector_per_unit = (
node_voltage_magnitude_vector
* mesmo.utils.get_inverse_with_zeros(np.abs(self.electric_grid_model.node_voltage_vector_reference))
)
branch_power_magnitude_vector_1_per_unit = (
branch_power_magnitude_vector_1
* mesmo.utils.get_inverse_with_zeros(self.electric_grid_model.branch_power_vector_magnitude_reference)
)
branch_power_magnitude_vector_2_per_unit = (
branch_power_magnitude_vector_2
* mesmo.utils.get_inverse_with_zeros(self.electric_grid_model.branch_power_vector_magnitude_reference)
)
# Store results.
return ElectricGridOperationResults(
electric_grid_model=self.electric_grid_model,
der_active_power_vector=der_active_power_vector,
der_active_power_vector_per_unit=der_active_power_vector_per_unit,
der_reactive_power_vector=der_reactive_power_vector,
der_reactive_power_vector_per_unit=der_reactive_power_vector_per_unit,
node_voltage_magnitude_vector=node_voltage_magnitude_vector,
node_voltage_magnitude_vector_per_unit=node_voltage_magnitude_vector_per_unit,
branch_power_magnitude_vector_1=branch_power_magnitude_vector_1,
branch_power_magnitude_vector_1_per_unit=branch_power_magnitude_vector_1_per_unit,
branch_power_magnitude_vector_2=branch_power_magnitude_vector_2,
branch_power_magnitude_vector_2_per_unit=branch_power_magnitude_vector_2_per_unit,
loss_active=loss_active,
loss_reactive=loss_reactive
)
class LinearElectricGridModel(mesmo.utils.ObjectBase):
"""Abstract linear electric model object, consisting of the sensitivity matrices for
voltage / voltage magnitude / squared branch power / active loss / reactive loss by changes in nodal wye power /
nodal delta power.
Note:
This abstract class only defines the expected variables of linear electric grid model objects,
but does not implement any functionality.
Attributes:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Reference power flow solution object.
sensitivity_voltage_by_power_wye_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active wye power vector.
sensitivity_voltage_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive wye power vector.
sensitivity_voltage_by_power_delta_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active delta power vector.
sensitivity_voltage_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive delta power vector.
sensitivity_voltage_by_der_power_active (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER active power vector.
sensitivity_voltage_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER reactive power vector.
sensitivity_voltage_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for voltage
magnitude vector by active wye power vector.
sensitivity_voltage_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive wye power vector.
sensitivity_voltage_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by active delta power vector.
sensitivity_voltage_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive delta power vector.
sensitivity_voltage_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER active power vector.
sensitivity_voltage_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER reactive power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER active power vector.
sensitivity_branch_power_1_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER reactive power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER active power vector.
sensitivity_branch_power_2_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER reactive power vector.
sensitivity_branch_power_1_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER active power vector.
sensitivity_branch_power_1_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER reactive power vector.
sensitivity_branch_power_2_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER active power vector.
sensitivity_branch_power_2_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER reactive power vector.
sensitivity_loss_active_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
active loss by active wye power vector.
sensitivity_loss_active_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive wye power vector.
sensitivity_loss_active_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
active loss by active delta power vector.
sensitivity_loss_active_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive delta power vector.
sensitivity_loss_active_by_der_power_active (sp.spmatrix): Sensitivity matrix for
active loss by DER active power vector.
sensitivity_loss_active_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
active loss by DER reactive power vector.
sensitivity_loss_reactive_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active wye power vector.
sensitivity_loss_reactive_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive wye power vector.
sensitivity_loss_reactive_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active delta power vector.
sensitivity_loss_reactive_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive delta power vector.
sensitivity_loss_reactive_by_der_power_active (sp.spmatrix): Sensitivity matrix for
reactive loss by DER active power vector.
sensitivity_loss_reactive_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by DER reactive power vector.
"""
electric_grid_model: ElectricGridModelDefault
power_flow_solution: PowerFlowSolution
sensitivity_voltage_by_power_wye_active: sp.spmatrix
sensitivity_voltage_by_power_wye_reactive: sp.spmatrix
sensitivity_voltage_by_power_delta_active: sp.spmatrix
sensitivity_voltage_by_power_delta_reactive: sp.spmatrix
sensitivity_voltage_by_der_power_active: sp.spmatrix
sensitivity_voltage_by_der_power_reactive: sp.spmatrix
sensitivity_voltage_magnitude_by_power_wye_active: sp.spmatrix
sensitivity_voltage_magnitude_by_power_wye_reactive: sp.spmatrix
sensitivity_voltage_magnitude_by_power_delta_active: sp.spmatrix
sensitivity_voltage_magnitude_by_power_delta_reactive: sp.spmatrix
sensitivity_voltage_magnitude_by_der_power_active: sp.spmatrix
sensitivity_voltage_magnitude_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_der_power_active: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_der_power_active: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_1_squared_by_der_power_active: sp.spmatrix
sensitivity_branch_power_1_squared_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_2_squared_by_der_power_active: sp.spmatrix
sensitivity_branch_power_2_squared_by_der_power_reactive: sp.spmatrix
sensitivity_loss_active_by_power_wye_active: sp.spmatrix
sensitivity_loss_active_by_power_wye_reactive: sp.spmatrix
sensitivity_loss_active_by_power_delta_active: sp.spmatrix
sensitivity_loss_active_by_power_delta_reactive: sp.spmatrix
sensitivity_loss_active_by_der_power_active: sp.spmatrix
sensitivity_loss_active_by_der_power_reactive: sp.spmatrix
sensitivity_loss_reactive_by_power_wye_active: sp.spmatrix
sensitivity_loss_reactive_by_power_wye_reactive: sp.spmatrix
sensitivity_loss_reactive_by_power_delta_active: sp.spmatrix
sensitivity_loss_reactive_by_power_delta_reactive: sp.spmatrix
sensitivity_loss_reactive_by_der_power_active: sp.spmatrix
sensitivity_loss_reactive_by_der_power_reactive: sp.spmatrix
class LinearElectricGridModelGlobal(LinearElectricGridModel):
"""Linear electric grid model object based on global approximations, consisting of the sensitivity matrices for
voltage / voltage magnitude / squared branch power / active loss / reactive loss by changes in nodal wye power /
nodal delta power.
:syntax:
- ``LinearElectricGridModelGlobal(electric_grid_model, power_flow_solution)``: Instantiate linear electric grid
model object for given `electric_grid_model` and `power_flow_solution`.
- ``LinearElectricGridModelGlobal(scenario_name)``: Instantiate linear electric grid model for given
`scenario_name`. The required `electric_grid_model` is obtained for given `scenario_name` and the
`power_flow_solution` is obtained for nominal power conditions.
Parameters:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Power flow solution object.
scenario_name (str): MESMO scenario name.
Attributes:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Reference power flow solution object.
sensitivity_voltage_by_power_wye_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active wye power vector.
sensitivity_voltage_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive wye power vector.
sensitivity_voltage_by_power_delta_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active delta power vector.
sensitivity_voltage_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive delta power vector.
sensitivity_voltage_by_der_power_active (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER active power vector.
sensitivity_voltage_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER reactive power vector.
sensitivity_voltage_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for voltage
magnitude vector by active wye power vector.
sensitivity_voltage_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive wye power vector.
sensitivity_voltage_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by active delta power vector.
sensitivity_voltage_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive delta power vector.
sensitivity_voltage_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER active power vector.
sensitivity_voltage_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER reactive power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER active power vector.
sensitivity_branch_power_1_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER reactive power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER active power vector.
sensitivity_branch_power_2_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER reactive power vector.
sensitivity_branch_power_1_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER active power vector.
sensitivity_branch_power_1_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER reactive power vector.
sensitivity_branch_power_2_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER active power vector.
sensitivity_branch_power_2_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER reactive power vector.
sensitivity_loss_active_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
active loss by active wye power vector.
sensitivity_loss_active_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive wye power vector.
sensitivity_loss_active_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
active loss by active delta power vector.
sensitivity_loss_active_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive delta power vector.
sensitivity_loss_active_by_der_power_active (sp.spmatrix): Sensitivity matrix for
active loss by DER active power vector.
sensitivity_loss_active_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
active loss by DER reactive power vector.
sensitivity_loss_reactive_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active wye power vector.
sensitivity_loss_reactive_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive wye power vector.
sensitivity_loss_reactive_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active delta power vector.
sensitivity_loss_reactive_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive delta power vector.
sensitivity_loss_reactive_by_der_power_active (sp.spmatrix): Sensitivity matrix for
reactive loss by DER active power vector.
sensitivity_loss_reactive_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by DER reactive power vector.
"""
@multimethod
def __init__(
self,
scenario_name: str,
):
# Obtain electric grid model.
electric_grid_model = (
ElectricGridModelDefault(scenario_name)
)
# Obtain der power vector.
der_power_vector = (
electric_grid_model.der_power_vector_reference
)
# Obtain power flow solution.
power_flow_solution = (
PowerFlowSolutionFixedPoint(
electric_grid_model,
der_power_vector
)
)
self.__init__(
electric_grid_model,
power_flow_solution
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution: PowerFlowSolution
):
# TODO: Validate linear model with delta DERs.
# Store power flow solution.
self.power_flow_solution = power_flow_solution
# Store electric grid model.
self.electric_grid_model = electric_grid_model
# Obtain shorthands for no-source matrices and vectors.
electric_grid_model.node_admittance_matrix_no_source = (
electric_grid_model.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
electric_grid_model.node_transformation_matrix_no_source = (
electric_grid_model.node_transformation_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
node_voltage_no_source = (
self.power_flow_solution.node_voltage_vector[
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
]
)
# Instantiate voltage sensitivity matrices.
self.sensitivity_voltage_by_power_wye_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_wye_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
# Calculate voltage sensitivity matrices.
# TODO: Document the change in sign in the reactive part compared to Hanif.
self.sensitivity_voltage_by_power_wye_active[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source.tocsc(),
sp.diags(np.conj(node_voltage_no_source) ** -1, format='csc')
)
)
self.sensitivity_voltage_by_power_wye_reactive[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
1.0j * electric_grid_model.node_admittance_matrix_no_source.tocsc(),
sp.diags(np.conj(node_voltage_no_source) ** -1, format='csc')
)
)
self.sensitivity_voltage_by_power_delta_active[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source.tocsc(),
np.transpose(electric_grid_model.node_transformation_matrix_no_source)
)
@ sp.diags(
(
(
electric_grid_model.node_transformation_matrix_no_source
@ np.conj(node_voltage_no_source)
) ** -1
).ravel()
)
)
self.sensitivity_voltage_by_power_delta_reactive[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
1.0j * electric_grid_model.node_admittance_matrix_no_source.tocsc(),
np.transpose(electric_grid_model.node_transformation_matrix_no_source)
)
@ sp.diags(
(
(
electric_grid_model.node_transformation_matrix_no_source
* np.conj(node_voltage_no_source)
) ** -1
).ravel()
)
)
self.sensitivity_voltage_by_der_power_active = (
self.sensitivity_voltage_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_by_der_power_reactive = (
self.sensitivity_voltage_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_voltage_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_voltage_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_voltage_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_voltage_magnitude_by_der_power_active = (
self.sensitivity_voltage_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_der_power_reactive = (
self.sensitivity_voltage_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate branch power sensitivity matrices.
sensitivity_branch_power_1_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_1_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_1_matrix
+ sp.diags((
electric_grid_model.branch_incidence_1_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_1_matrix
* np.sqrt(3)
)
sensitivity_branch_power_2_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_2_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_2_matrix
+ sp.diags((
electric_grid_model.branch_incidence_2_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_2_matrix
* np.sqrt(3)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_der_power_active = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_active = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_der_power_active = (
self.sensitivity_branch_power_1_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_der_power_reactive = (
self.sensitivity_branch_power_1_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_active = (
self.sensitivity_branch_power_2_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_reactive = (
self.sensitivity_branch_power_2_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate loss sensitivity matrices.
# sensitivity_loss_by_voltage = (
# np.array([self.power_flow_solution.node_voltage_vector])
# @ np.conj(electric_grid_model.node_admittance_matrix)
# + np.transpose(
# electric_grid_model.node_admittance_matrix
# @ np.transpose([self.power_flow_solution.node_voltage_vector])
# )
# )
sensitivity_loss_by_voltage = (
sum(np.transpose(
np.transpose(sensitivity_branch_power_1_by_voltage)
+ np.transpose(sensitivity_branch_power_2_by_voltage)
))
)
self.sensitivity_loss_active_by_power_wye_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_wye_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_reactive_by_power_wye_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_wye_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_active_by_der_power_active = (
self.sensitivity_loss_active_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_active_by_der_power_reactive = (
self.sensitivity_loss_active_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_active = (
self.sensitivity_loss_reactive_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_reactive = (
self.sensitivity_loss_reactive_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
class LinearElectricGridModelLocal(LinearElectricGridModel):
"""Linear electric grid model object based on local approximations, consisting of the sensitivity matrices for
voltage / voltage magnitude / squared branch power / active loss / reactive loss by changes in nodal wye power /
nodal delta power.
:syntax:
- ``LinearElectricGridModelLocal(electric_grid_model, power_flow_solution)``: Instantiate linear electric grid
model object for given `electric_grid_model` and `power_flow_solution`.
- ``LinearElectricGridModelLocal(scenario_name)``: Instantiate linear electric grid model for given
`scenario_name`. The required `electric_grid_model` is obtained for given `scenario_name` and the
`power_flow_solution` is obtained for nominal power conditions.
Parameters:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Power flow solution object.
scenario_name (str): MESMO scenario name.
Attributes:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Reference power flow solution object.
sensitivity_voltage_by_power_wye_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active wye power vector.
sensitivity_voltage_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive wye power vector.
sensitivity_voltage_by_power_delta_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active delta power vector.
sensitivity_voltage_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive delta power vector.
sensitivity_voltage_by_der_power_active (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER active power vector.
sensitivity_voltage_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER reactive power vector.
sensitivity_voltage_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for voltage
magnitude vector by active wye power vector.
sensitivity_voltage_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive wye power vector.
sensitivity_voltage_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by active delta power vector.
sensitivity_voltage_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive delta power vector.
sensitivity_voltage_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER active power vector.
sensitivity_voltage_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER reactive power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER active power vector.
sensitivity_branch_power_1_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER reactive power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER active power vector.
sensitivity_branch_power_2_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER reactive power vector.
sensitivity_branch_power_1_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER active power vector.
sensitivity_branch_power_1_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER reactive power vector.
sensitivity_branch_power_2_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER active power vector.
sensitivity_branch_power_2_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER reactive power vector.
sensitivity_loss_active_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
active loss by active wye power vector.
sensitivity_loss_active_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive wye power vector.
sensitivity_loss_active_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
active loss by active delta power vector.
sensitivity_loss_active_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive delta power vector.
sensitivity_loss_active_by_der_power_active (sp.spmatrix): Sensitivity matrix for
active loss by DER active power vector.
sensitivity_loss_active_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
active loss by DER reactive power vector.
sensitivity_loss_reactive_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active wye power vector.
sensitivity_loss_reactive_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive wye power vector.
sensitivity_loss_reactive_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active delta power vector.
sensitivity_loss_reactive_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive delta power vector.
sensitivity_loss_reactive_by_der_power_active (sp.spmatrix): Sensitivity matrix for
reactive loss by DER active power vector.
sensitivity_loss_reactive_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by DER reactive power vector.
"""
@multimethod
def __init__(
self,
scenario_name: str,
):
# Obtain electric grid model.
electric_grid_model = (
ElectricGridModelDefault(scenario_name)
)
# Obtain der power vector.
der_power_vector = (
electric_grid_model.der_power_vector_reference
)
# Obtain power flow solution.
power_flow_solution = (
PowerFlowSolutionFixedPoint(
electric_grid_model,
der_power_vector
)
)
self.__init__(
electric_grid_model,
power_flow_solution
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution: PowerFlowSolution
):
# Store power flow solution.
self.power_flow_solution = power_flow_solution
# Store electric grid model.
self.electric_grid_model = electric_grid_model
# Obtain shorthands for no-source matrices and vectors.
electric_grid_model.node_admittance_matrix_no_source = (
electric_grid_model.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
electric_grid_model.node_transformation_matrix_no_source = (
electric_grid_model.node_transformation_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
node_voltage_no_source = (
self.power_flow_solution.node_voltage_vector[
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
]
)
# Instantiate voltage sensitivity matrices.
self.sensitivity_voltage_by_power_wye_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_wye_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
# Calculate utility matrices.
A_matrix_inverse = (
sp.diags((
electric_grid_model.node_admittance_matrix_source_to_no_source
@ electric_grid_model.node_voltage_vector_reference_source
+ electric_grid_model.node_admittance_matrix_no_source
@ node_voltage_no_source
) ** -1)
)
A_matrix_conjugate = (
sp.diags(np.conj(
electric_grid_model.node_admittance_matrix_source_to_no_source
@ electric_grid_model.node_voltage_vector_reference_source
+ electric_grid_model.node_admittance_matrix_no_source
@ node_voltage_no_source
))
)
B_matrix = (
A_matrix_conjugate
- sp.diags(node_voltage_no_source)
@ np.conj(electric_grid_model.node_admittance_matrix_no_source)
@ A_matrix_inverse
@ sp.diags(np.conj(node_voltage_no_source))
@ electric_grid_model.node_admittance_matrix_no_source
)
# Calculate voltage sensitivity matrices.
# - TODO: Consider delta loads.
self.sensitivity_voltage_by_power_wye_active[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
B_matrix.tocsc(),
(
sp.identity(len(node_voltage_no_source))
- sp.diags(node_voltage_no_source)
@ np.conj(electric_grid_model.node_admittance_matrix_no_source)
@ A_matrix_inverse
@ sp.identity(len(node_voltage_no_source))
).tocsc()
)
)
self.sensitivity_voltage_by_power_wye_reactive[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
B_matrix.tocsc(),
(
(1.0j * sp.identity(len(node_voltage_no_source)))
- sp.diags(node_voltage_no_source)
@ np.conj(electric_grid_model.node_admittance_matrix_no_source)
@ A_matrix_inverse
@ (-1.0j * sp.identity(len(node_voltage_no_source)))
).tocsc()
)
)
# self.sensitivity_voltage_by_power_delta_active[np.ix_(
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
# )] = (
# ???
# )
# self.sensitivity_voltage_by_power_delta_reactive[np.ix_(
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
# )] = (
# ???
# )
self.sensitivity_voltage_by_der_power_active = (
self.sensitivity_voltage_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_by_der_power_reactive = (
self.sensitivity_voltage_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_voltage_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_voltage_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_voltage_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_voltage_magnitude_by_der_power_active = (
self.sensitivity_voltage_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_der_power_reactive = (
self.sensitivity_voltage_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate branch power sensitivity matrices.
sensitivity_branch_power_1_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_1_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_1_matrix
+ sp.diags((
electric_grid_model.branch_incidence_1_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_1_matrix
* np.sqrt(3)
)
sensitivity_branch_power_2_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_2_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_2_matrix
+ sp.diags((
electric_grid_model.branch_incidence_2_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_2_matrix
* np.sqrt(3)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_der_power_active = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_active = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_der_power_active = (
self.sensitivity_branch_power_1_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_der_power_reactive = (
self.sensitivity_branch_power_1_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_active = (
self.sensitivity_branch_power_2_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_reactive = (
self.sensitivity_branch_power_2_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate loss sensitivity matrices.
# sensitivity_loss_by_voltage = (
# np.array([self.power_flow_solution.node_voltage_vector])
# @ np.conj(electric_grid_model.node_admittance_matrix)
# + np.transpose(
# electric_grid_model.node_admittance_matrix
# @ np.transpose([self.power_flow_solution.node_voltage_vector])
# )
# )
sensitivity_loss_by_voltage = (
sum(np.transpose(
np.transpose(sensitivity_branch_power_1_by_voltage)
+ np.transpose(sensitivity_branch_power_2_by_voltage)
))
)
self.sensitivity_loss_active_by_power_wye_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_wye_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_reactive_by_power_wye_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_wye_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_active_by_der_power_active = (
self.sensitivity_loss_active_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_active_by_der_power_reactive = (
self.sensitivity_loss_active_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_active = (
self.sensitivity_loss_reactive_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_reactive = (
self.sensitivity_loss_reactive_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
class LinearElectricGridModelSet(mesmo.utils.ObjectBase):
linear_electric_grid_models: typing.Dict[pd.Timestamp, LinearElectricGridModel]
electric_grid_model: ElectricGridModelDefault
timesteps: pd.Index
@multimethod
def __init__(
self,
scenario_name: str
):
# Obtain electric grid model & reference power flow solution.
electric_grid_model = ElectricGridModelDefault(scenario_name)
power_flow_solution = PowerFlowSolutionFixedPoint(electric_grid_model)
self.__init__(
electric_grid_model,
power_flow_solution
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution: PowerFlowSolution,
linear_electric_grid_model_method: typing.Type[LinearElectricGridModel] = LinearElectricGridModelGlobal
):
self.check_linear_electric_grid_model_method(linear_electric_grid_model_method)
# Obtain linear electric grid models.
linear_electric_grid_model = linear_electric_grid_model_method(electric_grid_model, power_flow_solution)
linear_electric_grid_models = (
dict(zip(electric_grid_model.timesteps, itertools.repeat(linear_electric_grid_model)))
)
self.__init__(
electric_grid_model,
linear_electric_grid_models
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution_set: PowerFlowSolutionSet,
linear_electric_grid_model_method: typing.Type[LinearElectricGridModel] = LinearElectricGridModelLocal
):
self.check_linear_electric_grid_model_method(linear_electric_grid_model_method)
# Obtain linear electric grid models.
linear_electric_grid_models = (
mesmo.utils.starmap(
linear_electric_grid_model_method,
zip(
itertools.repeat(electric_grid_model),
power_flow_solution_set.power_flow_solutions.values()
)
)
)
linear_electric_grid_models = (
dict(zip(electric_grid_model.timesteps, linear_electric_grid_models))
)
self.__init__(
electric_grid_model,
linear_electric_grid_models
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
linear_electric_grid_models: typing.Dict[pd.Timestamp, LinearElectricGridModel]
):
# Store attributes.
self.electric_grid_model = electric_grid_model
self.timesteps = self.electric_grid_model.timesteps
self.linear_electric_grid_models = linear_electric_grid_models
@staticmethod
def check_linear_electric_grid_model_method(linear_electric_grid_model_method):
if not issubclass(linear_electric_grid_model_method, LinearElectricGridModel):
raise ValueError(f"Invalid linear electric grid model method: {linear_electric_grid_model_method}")
def define_optimization_problem(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
scenarios: typing.Union[list, pd.Index] = None,
**kwargs
):
# Defined optimization problem definitions through respective sub-methods.
self.define_optimization_variables(optimization_problem, scenarios=scenarios)
self.define_optimization_parameters(
optimization_problem,
price_data,
scenarios=scenarios,
**kwargs
)
self.define_optimization_constraints(optimization_problem, scenarios=scenarios)
self.define_optimization_objective(optimization_problem, scenarios=scenarios)
def define_optimization_variables(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Define DER power vector variables.
optimization_problem.define_variable(
'der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)
optimization_problem.define_variable(
'der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)
# Define node voltage magnitude variable.
optimization_problem.define_variable(
'node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)
# Define branch power magnitude variables.
optimization_problem.define_variable(
'branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)
optimization_problem.define_variable(
'branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)
# Define loss variables.
optimization_problem.define_variable(
'loss_active', scenario=scenarios, timestep=self.timesteps
)
optimization_problem.define_variable(
'loss_reactive', scenario=scenarios, timestep=self.timesteps
)
def define_optimization_parameters(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
node_voltage_magnitude_vector_minimum: np.ndarray = None,
node_voltage_magnitude_vector_maximum: np.ndarray = None,
branch_power_magnitude_vector_maximum: np.ndarray = None,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Obtain timestep interval in hours, for conversion of power to energy.
timestep_interval_hours = (self.timesteps[1] - self.timesteps[0]) / pd.Timedelta('1h')
# Define voltage variable terms.
optimization_problem.define_parameter(
'voltage_active_term',
sp.block_diag([
sp.diags(np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference) ** -1)
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'voltage_reactive_term',
sp.block_diag([
sp.diags(np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference) ** -1)
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define voltage constant term.
optimization_problem.define_parameter(
'voltage_constant',
np.concatenate([
sp.diags(np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference) ** -1)
@ (
np.transpose([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector)])
- linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
) for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 1) variable terms.
optimization_problem.define_parameter(
'branch_power_1_active_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'branch_power_1_reactive_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 1) constant terms.
optimization_problem.define_parameter(
'branch_power_1_constant',
np.concatenate([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ (
np.transpose([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1)])
- linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
) for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 2) variable terms.
optimization_problem.define_parameter(
'branch_power_2_active_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'branch_power_2_reactive_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 2) constant term.
optimization_problem.define_parameter(
'branch_power_2_constant',
np.concatenate([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ (
np.transpose([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2)])
- linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
) for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define active loss variable terms.
optimization_problem.define_parameter(
'loss_active_active_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'loss_active_reactive_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define active loss constant term.
optimization_problem.define_parameter(
'loss_active_constant',
np.concatenate([
np.real(linear_electric_grid_model.power_flow_solution.loss)
- linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define reactive loss variable terms.
optimization_problem.define_parameter(
'loss_reactive_active_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'loss_reactive_reactive_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define active loss constant term.
optimization_problem.define_parameter(
'loss_reactive_constant',
np.concatenate([
np.imag(linear_electric_grid_model.power_flow_solution.loss)
- linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define voltage limits.
optimization_problem.define_parameter(
'voltage_limit_minimum',
np.concatenate([
node_voltage_magnitude_vector_minimum.ravel()
/ np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference)
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if node_voltage_magnitude_vector_minimum is not None
else -np.inf * np.ones((len(self.electric_grid_model.nodes) * len(self.timesteps), ))
)
optimization_problem.define_parameter(
'voltage_limit_maximum',
np.concatenate([
node_voltage_magnitude_vector_maximum.ravel()
/ np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference)
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if node_voltage_magnitude_vector_maximum is not None
else +np.inf * np.ones((len(self.electric_grid_model.nodes) * len(self.timesteps), ))
)
# Define branch flow limits.
optimization_problem.define_parameter(
'branch_power_minimum',
np.concatenate([
- branch_power_magnitude_vector_maximum.ravel()
/ linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if branch_power_magnitude_vector_maximum is not None
else -np.inf * np.ones((len(self.electric_grid_model.branches) * len(self.timesteps), ))
)
optimization_problem.define_parameter(
'branch_power_maximum',
np.concatenate([
branch_power_magnitude_vector_maximum.ravel()
/ linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if branch_power_magnitude_vector_maximum is not None
else +np.inf * np.ones((len(self.electric_grid_model.branches) * len(self.timesteps), ))
)
# Define objective parameters.
optimization_problem.define_parameter(
'electric_grid_active_power_cost',
np.array([price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values])
* -1.0 * timestep_interval_hours # In Wh.
@ sp.block_diag(
[np.array([np.real(self.electric_grid_model.der_power_vector_reference)])] * len(self.timesteps)
)
)
optimization_problem.define_parameter(
'electric_grid_active_power_cost_sensitivity',
price_data.price_sensitivity_coefficient
* timestep_interval_hours # In Wh.
* np.concatenate([np.real(self.electric_grid_model.der_power_vector_reference) ** 2] * len(self.timesteps))
)
optimization_problem.define_parameter(
'electric_grid_reactive_power_cost',
np.array([price_data.price_timeseries.loc[:, ('reactive_power', 'source', 'source')].values])
* -1.0 * timestep_interval_hours # In Wh.
@ sp.block_diag(
[np.array([np.imag(self.electric_grid_model.der_power_vector_reference)])] * len(self.timesteps)
)
)
optimization_problem.define_parameter(
'electric_grid_reactive_power_cost_sensitivity',
price_data.price_sensitivity_coefficient
* timestep_interval_hours # In Wh.
* np.concatenate([np.imag(self.electric_grid_model.der_power_vector_reference) ** 2] * len(self.timesteps))
)
optimization_problem.define_parameter(
'electric_grid_loss_active_cost',
price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values
* timestep_interval_hours # In Wh.
)
optimization_problem.define_parameter(
'electric_grid_loss_active_cost_sensitivity',
price_data.price_sensitivity_coefficient
* timestep_interval_hours # In Wh.
)
def define_optimization_constraints(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Define voltage equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)),
'==',
('variable', 'voltage_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'voltage_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'voltage_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define branch flow (direction 1) equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'==',
('variable', 'branch_power_1_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'branch_power_1_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'branch_power_1_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define branch flow (direction 2) equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'==',
('variable', 'branch_power_2_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'branch_power_2_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'branch_power_2_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define active loss equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(name='loss_active', scenario=scenarios, timestep=self.timesteps)),
'==',
('variable', 'loss_active_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'loss_active_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'loss_active_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define reactive loss equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(name='loss_reactive', scenario=scenarios, timestep=self.timesteps)),
'==',
('variable', 'loss_reactive_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'loss_reactive_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'loss_reactive_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define voltage limits.
# Add dedicated keys to enable retrieving dual variables.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)),
'>=',
('constant', 'voltage_limit_minimum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='voltage_magnitude_vector_minimum_constraint', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)),
'<=',
('constant', 'voltage_limit_maximum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='voltage_magnitude_vector_maximum_constraint', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
),
broadcast='scenario'
)
# Define branch flow limits.
# Add dedicated keys to enable retrieving dual variables.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'>=',
('constant', 'branch_power_minimum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_1_minimum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'<=',
('constant', 'branch_power_maximum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_1_maximum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'>=',
('constant', 'branch_power_minimum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_2_minimum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'<=',
('constant', 'branch_power_maximum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_2_maximum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
def define_optimization_objective(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Set objective flag.
optimization_problem.flags['has_electric_grid_objective'] = True
# Define objective for electric loads.
# - Defined as cost of electric supply at electric grid source node.
# - Only defined here, if not yet defined as cost of electric power supply at the DER node
# in `mesmo.der_models.DERModel.define_optimization_objective`.
if not optimization_problem.flags.get('has_der_objective'):
# Active power cost / revenue.
# - Cost for load / demand, revenue for generation / supply.
optimization_problem.define_objective(
('variable', 'electric_grid_active_power_cost', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'electric_grid_active_power_cost_sensitivity', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
), dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
broadcast='scenario'
)
# Reactive power cost / revenue.
# - Cost for load / demand, revenue for generation / supply.
optimization_problem.define_objective(
('variable', 'electric_grid_reactive_power_cost', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'electric_grid_reactive_power_cost_sensitivity', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
), dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
broadcast='scenario'
)
# Define active loss cost.
optimization_problem.define_objective(
('variable', 'electric_grid_loss_active_cost', dict(
name='loss_active', scenario=scenarios, timestep=self.timesteps
)),
('variable', 'electric_grid_loss_active_cost_sensitivity', dict(
name='loss_active', scenario=scenarios, timestep=self.timesteps
), dict(
name='loss_active', scenario=scenarios, timestep=self.timesteps
)),
broadcast='scenario'
)
def evaluate_optimization_objective(
self,
results: ElectricGridOperationResults,
price_data: mesmo.data_interface.PriceData
) -> float:
# Instantiate optimization problem.
optimization_problem = mesmo.utils.OptimizationProblem()
self.define_optimization_parameters(optimization_problem, price_data)
self.define_optimization_variables(optimization_problem)
self.define_optimization_objective(optimization_problem)
# Instantiate variable vector.
x_vector = np.zeros((len(optimization_problem.variables), 1))
# Set variable vector values.
objective_variable_names = [
'der_active_power_vector_per_unit',
'der_reactive_power_vector_per_unit',
'loss_active'
]
for variable_name in objective_variable_names:
index = mesmo.utils.get_index(optimization_problem.variables, name=variable_name.replace('_per_unit', ''))
x_vector[index, 0] = results[variable_name].values.ravel()
# Obtain objective value.
objective = optimization_problem.evaluate_objective(x_vector)
return objective
def get_optimization_dlmps(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
scenarios: typing.Union[list, pd.Index] = None
) -> ElectricGridDLMPResults:
# Obtain results index sets, depending on if / if not scenarios given.
if scenarios in [None, [None]]:
scenarios = [None]
ders = self.electric_grid_model.ders
nodes = self.electric_grid_model.nodes
branches = self.electric_grid_model.branches
else:
ders = (
pd.MultiIndex.from_product(
(scenarios, self.electric_grid_model.ders.to_flat_index()),
names=['scenario', 'der']
)
)
nodes = (
pd.MultiIndex.from_product(
(scenarios, self.electric_grid_model.nodes.to_flat_index()),
names=['scenario', 'node']
)
)
branches = (
pd.MultiIndex.from_product(
(scenarios, self.electric_grid_model.branches.to_flat_index()),
names=['scenario', 'branch']
)
)
# Obtain individual duals.
voltage_magnitude_vector_minimum_dual = (
optimization_problem.duals['voltage_magnitude_vector_minimum_constraint'].loc[
self.electric_grid_model.timesteps, nodes
]
/ np.concatenate([np.abs(self.electric_grid_model.node_voltage_vector_reference)] * len(scenarios))
)
voltage_magnitude_vector_maximum_dual = (
-1.0 * optimization_problem.duals['voltage_magnitude_vector_maximum_constraint'].loc[
self.electric_grid_model.timesteps, nodes
]
/ np.concatenate([np.abs(self.electric_grid_model.node_voltage_vector_reference)] * len(scenarios))
)
branch_power_magnitude_vector_1_minimum_dual = (
optimization_problem.duals['branch_power_magnitude_vector_1_minimum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
branch_power_magnitude_vector_1_maximum_dual = (
-1.0 * optimization_problem.duals['branch_power_magnitude_vector_1_maximum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
branch_power_magnitude_vector_2_minimum_dual = (
optimization_problem.duals['branch_power_magnitude_vector_2_minimum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
branch_power_magnitude_vector_2_maximum_dual = (
-1.0 * optimization_problem.duals['branch_power_magnitude_vector_2_maximum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
# Instantiate DLMP variables.
# TODO: Consider delta connections in nodal DLMPs.
# TODO: Consider single-phase DLMPs.
electric_grid_energy_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_voltage_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_congestion_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_loss_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_energy_dlmp_node_reactive_power = (
| pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float) | pandas.DataFrame |
import os
import argparse
import numpy as np
import pandas as pd
import nibabel as nib
from ukbb_cardiac.common.cardiac_utils import get_frames
from ukbb_cardiac.common.image_utils import np_categorical_dice
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--output_csv', metavar='csv_name', default='DM_table.csv', required=True)
args = parser.parse_args()
print('Creating accuracy spreadsheet file ...')
if os.path.exists(args.output_csv):
os.remove(args.output_csv)
# Record ED ES frames to csv
init = {'Data': [],
'EDLV': [],
'EDLVM': [],
'EDRV': [],
'ESLV': [],
'ESLVM': [],
'ESRV': [],
}
df = | pd.DataFrame(init) | pandas.DataFrame |
# --------------
# import the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df = pd.read_json(path,lines=True)
df.columns=df.columns.str.strip().str.lower().str.replace(' ', '_')
##df.dropna(inplace=True)
df.drop(columns=['waist', 'bust', 'user_name','review_text','review_summary','shoe_size','shoe_width'],axis=1,inplace=True)
print(df.columns)
y=df['fit']
X=df.drop('fit',axis=1)
print(X.columns)
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.33,random_state=6)
print(y_train.shape)
print(y_test.shape)
# Code ends here
# Code ends here
# --------------
def plot_barh(df,col, cmap = None, stacked=False, norm = None):
df.plot(kind='barh', colormap=cmap, stacked=stacked)
fig = plt.gcf()
fig.set_size_inches(24,12)
plt.title("Category vs {}-feedback - cloth {}".format(col, '(Normalized)' if norm else ''), fontsize= 20)
plt.ylabel('Category', fontsize = 18)
plot = plt.xlabel('Frequency', fontsize=18)
# Code starts here
g_by_category = df.groupby('category')
cat_fit=g_by_category.count()['fit']
# Code ends here
# --------------
# Code starts here
g_by_category = df.groupby('category')
cat_len = g_by_category['length'].value_counts()
print(cat_len)
cat_len = cat_len.unstack()
print(cat_len)
plot_barh(cat_len, 'length')
# Code ends here
# --------------
# Code starts here
def get_cms(x):
if type(x) == type(1.0):
return
try:
return (int(x[0])*30.48) + (int(x[4:-2])*2.54)
except:
return (int(x[0])*30.48)
print(X_train['height'].isnull().sum())
###X_train['height'].dropna(inplace=True)
##for i in range(len(X_train)):
## X_train['height'][i]= (int(X_train['height'][i][0])*30.48) + (int(X_train['height'][i][4:-2])*2.54)
X_train['height']=X_train['height'].apply(get_cms)
X_test['height']=X_test['height'].apply(get_cms)
# Code ends here
# --------------
# Code starts here
print(X_train.isnull().sum())
X_train.dropna(subset=['height','length','quality'],inplace=True)
X_test.dropna(subset=['height','length','quality'],inplace=True)
X_train['bra_size'].fillna((X_train['bra_size'].mean()), inplace=True)
X_test['bra_size'].fillna((X_test['bra_size'].mean()), inplace=True)
mode_1=X_train['cup_size'].mode()[0]
mode_2=X_test['cup_size'].mode()[0]
X_train['cup_size']=X_train['cup_size'].replace(np.nan,mode_1)
X_test['cup_size']=X_test['cup_size'].replace(np.nan,mode_2)
# Code ends here
# --------------
print(X_test.head(2))
X_train=pd.get_dummies(data=X_train,columns=["category", "cup_size","length"],prefix=["category", "cup_size","length"])
X_test= | pd.get_dummies(data=X_test,columns=["category", "cup_size","length"],prefix=["category", "cup_size","length"]) | pandas.get_dummies |
"""
PRESSGRAPHS DASH CLIENT
WEB GUI interface for PressGraphs WebAPI
"""
###################################
# IMPORTS
###################################
#builtins
from datetime import datetime
from datetime import timedelta
#3rd party
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_table as dt
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import requests
from dash.dependencies import Input, Output, State
#oww
from md import md_txt
###################################
# DEFINITIONS
###################################
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.CERULEAN])
app.title = 'Press Graphs'
app.config.suppress_callback_exceptions = True
server = app.server
startup_time = datetime.now().strftime("%Y %m %d %H:%M")
API_KEY = "" # register your own API key at http://pressgraphs.pythonanywhere.com/create/test_user
MAX_REQUEST_DAY = 90
def build_layout():
"""
def to serve app.layout every time the app loads
"""
layout = html.Div(style={"padding":"2vw"},
children=[dcc.Location(id='url', refresh=True),
dbc.Nav([
dbc.NavItem(dbc.NavLink("kezdőlap", active=True, href="/")),
dbc.NavItem(dbc.NavLink("dátum szerint", href="/all_date")),
dbc.NavItem(dbc.NavLink("újságok szerint", href="/all_org")),
dbc.NavItem(dbc.NavLink("újság szerint", href="/site_tab")),
dbc.NavItem(dbc.NavLink("két újság összevetése", href="/site_vs_tab")),
dbc.NavItem(dbc.NavLink("két szó összevetése", href="words_tab")),
dbc.DropdownMenu(
[dbc.DropdownMenuItem("újságok", href="mo"),
dbc.DropdownMenuItem("útmutató", href ="manual"),
dbc.DropdownMenuItem("elérhetőség", href="contact")],
label="további info",
nav=True)]),
html.Hr(),
html.Div(id='page-content'),
html.Hr()])
return layout
def md_linkler(url: str) ->str:
"""
transforms url to markdown type link
"""
md_link = f"[link]({url})"
return md_link
def update_dt_by_date(dataframe: pd.DataFrame()) -> dt.DataTable():
"""
updates dash_table with passed dataframe
returns dash_table
"""
dataframe["link"] = dataframe["url"].copy()
dataframe["link"] = dataframe["link"].apply(md_linkler)
columns = [{'name': 'dátum', 'id':'date'},
{'name': 'oldal', 'id':'site'},
{'name': 'cím', 'id':'title'},
{'name': 'link', 'id':'link', 'type':'text', 'presentation': 'markdown'},
{'name': 'url', 'id':'url'}]
data = dataframe.to_dict('records')
data_table = dt.DataTable(
style_table={"padding": "50px", "maxHeight": '350px',
"overflowY": "scroll"},
style_data={'whiteSpace': 'normal', 'height': 'auto'},
style_cell={'textAlign': 'left'},
style_cell_conditional=[
{'if': {'column_id': 'date'}, 'width': '30px'},
{'if': {'column_id': 'site'}, 'width': '30px'},
{'if': {'column_id': 'title'}, 'width': '250px'},
{'if': {'column_id': 'link'}, 'width': '30px'},
{'if': {'column_id': 'url'}, 'width': '100px'}],
data=data,
columns=columns,
page_size=50,
export_format="xlsx")
return data_table
def plot_all_by_date(*, dataframe: pd.DataFrame(), search_word: str) -> px.bar:
"""
:date_count:pd.DataFrame
returns: plotly.express.px.bar
"""
if len(dataframe) > 0:
dataframe.columns = ["találatok száma"]
fig = px.bar(dataframe,
height=500,
x=dataframe.index,
y="találatok száma",
color="találatok száma",
labels={"x": "dátum", "date": "cikkek száma"},
opacity=.75,
color_continuous_scale="Geyser"
)
fig.update_layout(
title={'text': f"""A '{search_word}' szó száma a cikkek címeiben
{dataframe.index.min()}--{dataframe.index.max()}.""",
'y': 0.900,
'x': 0.50},
xaxis_title="Dátum",
yaxis_title="Cikkek száma",
yaxis_tickformat = 'd',
transition={'duration': 500},
plot_bgcolor="rgba(0,0,0,0)",
font={"family":"Courier New, monospace",
"size":11,
"color":"#000000"
})
fig.update_xaxes(showgrid=False)
fig.update_yaxes(showgrid=True, gridcolor = '#bdbdbd')
if len(dataframe) < 5:
fig.update_layout(xaxis_showticklabels = False, width=750)
fig.update_yaxes(showgrid=False, dtick=1)
return fig
return px.bar()
def plot_all_by_sites(*, dataframe: pd.DataFrame(), search_word: str):
"""
#Horizontal barchart with top n sites
"""
if len(dataframe) > 0:
df = dataframe
df.rename(columns={'title': 'darab'}, inplace=True)
fig = px.bar(df,
height=1500,
orientation='h',
x="darab",
y=df.index,
labels={"y": "orgánum", "x": "cikkek száma"},
opacity=.75,
)
fig.update_layout(
title={'text': "Találatok az elmúlt 90 napból"},
plot_bgcolor="rgba(0,0,0,0)",
yaxis_title="Újságok",
xaxis_title="Cikkek száma",
font={
"family":"Courier New, monospace",
"size":10,
"color":"#000000"
})
fig.update_traces(marker_color='black')
fig.update_xaxes(showgrid=True, gridcolor='#bdbdbd')
fig.update_yaxes(showgrid=False)
return fig
return px.bar()
def compare_two_sites(*,
search_word,
site1_df,
site2_df,
site_1,
site_2):
"""
#Comparison line chart
"""
if search_word:
search_word = str(search_word).lower()
site_corr = site1_df["count"].corr(site2_df["count"])
fig = go.Figure(
layout=go.Layout(
annotations=[go.layout.Annotation(
text=f'Korrelációs együttható (r): {site_corr:.2f}',
hovertext="""Tartomány: -1 és 1 között. Jelzi két tetszőleges érték közötti lineáris kapcsolat nagyságát és irányát.""",
borderpad=1,
bgcolor="#ffffcc",
align='left',
showarrow=False,
xref='paper',
yref='paper',
x=0,
y=1,
bordercolor='grey',
borderwidth=1)]))
fig.add_trace(go.Scatter(x=site1_df.index, y=site1_df["count"],
mode='lines',
line_shape='linear',
name=f'{site_1}'))
fig.add_trace(go.Scatter(x=site2_df.index, y=site2_df["count"],
mode='lines',
line_shape='linear',
name=f'{site_2}'))
fig.update_layout(
title=f"""'{site_1}' és '{site_2}': '{search_word}' szó száma a cikkek címeiben""",
xaxis_title="Dátum",
yaxis_title="Cikkek száma",
plot_bgcolor="rgba(0,0,0,0)",
)
fig.update_xaxes(showgrid=False)
fig.update_yaxes(showgrid=True, gridcolor='#bdbdbd')
return fig
return px.bar()
def compare_two_search_words(*,
sw_df_1,
sw_df_2,
search_word_1,
search_word_2):
"""
#TODO
"""
if search_word_1:
sw1 = search_word_1.split()[0].strip()
sw2 = search_word_2.split()[0].strip()
corr = sw_df_1["count"].corr(sw_df_2["count"])
fig = go.Figure(
layout=go.Layout(
annotations=[go.layout.Annotation(
text=f'Korrelációs együttható (r): {corr:.2f}',
hovertext="""Tartomány: -1 és 1 között.""",
borderpad=1,
bgcolor="#ffffcc",
align='left',
showarrow=False,
xref='paper',
yref='paper',
x=0,
y=1,
bordercolor='grey',
borderwidth=1)]))
fig.add_trace(go.Scatter(x=sw_df_1.index, y=sw_df_1["count"],
mode='lines',
line_shape='linear',
name=f'{sw1}'))
fig.add_trace(go.Scatter(x=sw_df_2.index, y=sw_df_2["count"],
mode='lines',
line_shape='linear',
name=f'{sw2}'))
fig.update_layout(
height=600,
title={'text': f"'{sw1}' és '{sw2}' szavak száma a cikkek címeiben",
'y':0.90,
'x':0.5},
xaxis_title="Dátum",
yaxis_title="Cikkek száma",
plot_bgcolor="rgba(0,0,0,0)",
font=dict(
family="Courier New, monospace",
size=11,
color="#000000"
))
fig.update_xaxes(showgrid=False)
fig.update_yaxes(showgrid=True, gridcolor='#bdbdbd')
return fig
return px.bar()
###################################
# LAYOUT
###################################
print("loading layout")
app.layout = build_layout
@app.callback(
Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/all_date':
return page_1_layout
elif pathname == '/all_org':
return page_2_layout
elif pathname == '/site_tab':
return page_3_layout
elif pathname == '/site_vs_tab':
return page_4_layout
elif pathname == '/words_tab':
return page_5_layout
elif pathname == '/contact':
return page_6_layout
elif pathname == '/manual':
return page_7_layout
elif pathname == '/mo':
return page_8_layout
else:
return index_page
###################################
# INDEX
###################################
index_page = html.Div([
dcc.Markdown(children=md_txt.index_txt)])
###################################
# PAGE 1 LAYOUT
###################################
page_1_layout = html.Div([
dbc.Row(dbc.Col(html.Div(
dbc.Input(id="search_input",
placeholder="keresett szó...",
type="text",
value="")), width=3)),
html.Br(),
dbc.Button("Keresés",
outline=True,
color="info",
className="mr-1",
id='submit-button',
n_clicks=0),
dbc.Checklist(options=[{"label": "keresés szavakon belül", "value": 1}],
value=[],
id="switch-input",
switch=True),
dcc.Graph(id='max_date_bargraph'),
html.Div(id="table1", style={'font-family': 'Impact'})])
###################################
# PAGE 1 CHART CALLBACK
###################################
@app.callback(Output('max_date_bargraph', 'figure'),
[Input('submit-button', 'n_clicks'),
Input('search_input', 'n_submit'),
Input('switch-input', 'value')],
[State('search_input', 'value')])
def date_count_all_site(n_clicks, n_submit, switch_value, search_word):
"""
"""
if n_clicks or n_submit:
search_word = search_word.strip()
if switch_value:
switch_value = 1
else:
switch_value = 0
site="all"
today = datetime.today().strftime("%Y-%m-%d")
from_date = (datetime.today() - \
timedelta(days = MAX_REQUEST_DAY)).strftime("%Y-%m-%d")
api_url = f"http://pressgraphs.pythonanywhere.com/date/count/"\
f"{API_KEY}/{search_word}/{switch_value}/{from_date}/{today}/{site}"
response = requests.get(api_url)
content = response.json()[1]["data"]
res_df = pd.DataFrame(content)
if len(res_df) > 0:
res_df.set_index("date", inplace=True)
else:
res_df = pd.DataFrame()
fig = plot_all_by_date(dataframe=res_df, search_word=search_word)
return fig
###################################
# PAGE 1 DATA TABLE CALLBACK
###################################
@app.callback(Output('table1', 'children'),
[Input('max_date_bargraph', 'clickData'),
Input('submit-button', 'n_clicks'),
Input('switch-input', 'value')],
[State('search_input', 'value')])
def update_table(clickData, n_clicks, switch_value, search_word):
"""
#TODO
"""
if clickData:
search_word = search_word.strip()
date = list(clickData["points"])[0]["label"]
site = "all"
if switch_value:
switch_value = 1
else:
switch_value = 0
api_url = f"http://pressgraphs.pythonanywhere.com/date/list/"\
f"{API_KEY}/{search_word}/{switch_value}/{date}/{date}/{site}"
response = requests.get(api_url)
content = response.json()[1]["data"]
df = pd.DataFrame(content)
return update_dt_by_date(df)
else:
return
###################################
# PAGE 2 LAYOUT
###################################
page_2_layout = html.Div([
dbc.Row(dbc.Col(html.Div(
dbc.Input(id="search_input",
placeholder="keresett szó...",
type="text",
value="")), width=3)),
html.Br(),
dbc.Button("Keresés",
outline=True,
color="info",
className="mr-1",
id='submit-button',
n_clicks=0),
dbc.Checklist(options=[{"label": "keresés szavakon belül", "value": 1}],
value=[],
id="switch-input",
switch=True),
html.Div(id='my-output'),
dcc.Graph(id='bargraph_2'),
html.Div(id="table2", style={'font-family': 'Impact'})])
###################################
# PAGE 2 CHART CALLBACK
###################################
@app.callback(Output('bargraph_2', 'figure'),
[Input('submit-button', 'n_clicks'),
Input('search_input', 'n_submit'),
Input('switch-input', 'value')],
[State('search_input', 'value')])
def update_by_site(n_clicks, n_submit, switch_value, search_word):
if n_clicks or n_submit:
search_word = search_word.strip()
if switch_value:
switch_value = 1
else:
switch_value = 0
site="all"
today = datetime.today().strftime("%Y-%m-%d")
from_date = (datetime.today() - \
timedelta(days = MAX_REQUEST_DAY)).strftime("%Y-%m-%d")
api_url = f"http://pressgraphs.pythonanywhere.com/date/list/"\
f"{API_KEY}/{search_word}/{switch_value}/{from_date}/{today}/{site}"
response = requests.get(api_url)
content = response.json()[1]["data"]
res_df = pd.DataFrame(content)
df = res_df.groupby(by="site").count()["title"]
df = pd.DataFrame(df.sort_values(ascending=True)[:])
else:
df = pd.DataFrame()
fig = plot_all_by_sites(dataframe=df, search_word=search_word)
return fig
###################################
# PAGE 2 DATA TABLE CALLBACK
###################################
@app.callback(Output('table2', 'children'),
[Input('bargraph_2', 'clickData'),
Input('submit-button', 'n_clicks'),
Input('switch-input', 'value')],
[State('search_input', 'value')])
def display_clickData_2(clickData, n_clicks, switch_value, search_word):
if clickData:
search_word = search_word.strip()
today = datetime.today().strftime("%Y-%m-%d")
from_date = (datetime.today() - \
timedelta(days = MAX_REQUEST_DAY)).strftime("%Y-%m-%d")
site = list(clickData["points"])[0]["label"]
if switch_value:
switch_value = 1
else:
switch_value = 0
api_url = f"http://pressgraphs.pythonanywhere.com/date/list/"\
f"{API_KEY}/{search_word}/{switch_value}/{from_date}/{today}/{site}"
response = requests.get(api_url)
content = response.json()[1]["data"]
df = pd.DataFrame(content)
return update_dt_by_date(df)
else:
return
###################################
# PAGE 3 LAYOUT
###################################
api_url = f"""http://pressgraphs.pythonanywhere.com/{API_KEY}/info/sites/all"""
response = requests.get(api_url)
schema = response.json()[0]
st_options = pd.DataFrame(response.json()[1]["data"])
page_3_layout = html.Div([
html.H5("oldal szerinti keresés"),
dbc.Row(dbc.Col(html.Div(
dbc.Input(id="search_input",
placeholder="keresett szó...",
type="text",
value='')), width=3)),
html.Br(),
dbc.Row(dbc.Col(html.Div(dcc.Dropdown(
id="sites",
options=[{
'label': i,
'value': i
} for i in st_options["site"]],
placeholder="keresett oldal...",
value='')), width=3)),
html.Br(),
dbc.Button("Keresés",
outline=True,
color="info",
className="mr-1",
id='submit-button',
n_clicks=0),
dbc.Checklist(options=[{"label": "keresés szavakon belül", "value": 1}],
value=[],
id="switch-input",
switch=True),
dcc.Graph(id='bargraph_3'),
html.Div(id="table3")])
###################################
# PAGE 3 CHART CALLBACK
###################################
@app.callback(Output('bargraph_3','figure'),
[Input('submit-button', 'n_clicks'),
Input('search_input', 'n_submit'),
Input('switch-input', 'value')],
[State('search_input', 'value'),
State('sites', 'value')])
def update_site_graph(n_clicks, n_submit, switch_value, search_word, site):
"""
"""
if n_clicks or n_submit:
search_word = search_word.strip()
if switch_value:
switch_value = 1
else:
switch_value = 0
site=site
today = datetime.today().strftime("%Y-%m-%d")
from_date = (datetime.today() - \
timedelta(days = MAX_REQUEST_DAY)).strftime("%Y-%m-%d")
api_url = f"http://pressgraphs.pythonanywhere.com/date/count/"\
f"{API_KEY}/{search_word}/{switch_value}/{from_date}/{today}/{site}"
response = requests.get(api_url)
content = response.json()[1]["data"]
res_df = pd.DataFrame(content)
if len(res_df) > 0:
res_df.set_index("date",inplace=True)
else:
res_df = pd.DataFrame()
fig = plot_all_by_date(dataframe=res_df,
search_word=search_word)
return fig
###################################
# PAGE 3 DATA TABLE CALLBACK
###################################
@app.callback(Output('table3', 'children'),
[Input('bargraph_3', 'clickData'),
Input('submit-button', 'n_clicks'),
Input('switch-input', 'value')],
[State('search_input', 'value'),
State('sites', 'value')])
def display_clickData_3(clickData, n_clicks, switch_value, search_word, site):
"""
#TODO
"""
if clickData:
search_word = search_word.strip()
date = list(clickData["points"])[0]["label"]
if switch_value:
switch_value = 1
else:
switch_value = 0
api_url = f"http://pressgraphs.pythonanywhere.com/date/list/"\
f"{API_KEY}/{search_word}/{switch_value}/{date}/{date}/{site}"
response = requests.get(api_url)
content = response.json()[1]["data"]
df = pd.DataFrame(content)
return update_dt_by_date(df)
else:
return
###################################
# PAGE 4 LAYOUT
###################################
api_url = f"""http://pressgraphs.pythonanywhere.com/{API_KEY}/info/sites/all"""
response = requests.get(api_url)
schema = response.json()[0]
st_options = pd.DataFrame(response.json()[1]["data"])
page_4_layout = html.Div([
html.H5("két oldal összevetése"),
dbc.Row(dbc.Col(html.Div(
dbc.Input(id="search_input",
placeholder="keresett szó...",
type="text",
value='')),width=3)),
html.Br(),
dbc.Row(dbc.Col(html.Div(dcc.Dropdown(
id="site_1",
options=[{
'label': i,
'value': i
} for i in st_options["site"]],
placeholder="első oldal...",
value='')), width=3)),
html.Br(),
dbc.Row(dbc.Col(html.Div(dcc.Dropdown(
id="site_2",
options=[{
'label': i,
'value': i
} for i in st_options["site"]],
placeholder="második oldal...",
value='')), width=3)),
html.Br(),
dbc.Button("Keresés",
outline=True,
color="info",
className="mr-1",
id='submit-button',
n_clicks=0),
dbc.Checklist(options=[{"label": "keresés szavakon belül", "value": 1}],
value=[],
id="switch-input",
switch=True,
),
dcc.Graph(id='graph_4'),
html.Div(id="table4")])
###################################
# PAGE 4 CAHRT CALLBACK
###################################
@app.callback(Output('graph_4','figure'),
[Input('submit-button', 'n_clicks'),
Input('search_input', 'n_submit'),
Input('switch-input', 'value')],
[State('search_input', 'value'),
State('site_1', 'value'),
State('site_2', 'value')])
def update_site_comparison(n_clicks, n_submit, switch_value, search_word, st1, st2):
"""
#TODO
"""
if n_clicks or n_submit:
search_word = search_word.strip()
if switch_value:
switch_value = 1
else:
switch_value = 0
today = datetime.today().strftime("%Y-%m-%d")
from_date = (datetime.today() - \
timedelta(days = MAX_REQUEST_DAY)).strftime("%Y-%m-%d")
api_url = f"http://pressgraphs.pythonanywhere.com/date/count/"\
f"{API_KEY}/{search_word}/{switch_value}/{from_date}/{today}/{st1}"""
response = requests.get(api_url)
s_1_content = response.json()[1]["data"]
s1_df = pd.DataFrame(s_1_content)
s1_df.set_index("date", inplace=True)
api_url = f"http://pressgraphs.pythonanywhere.com/date/count/"\
f"{API_KEY}/{search_word}/{switch_value}/{from_date}/{today}/{st2}"""
response = requests.get(api_url)
s_2_content = response.json()[1]["data"]
s2_df = | pd.DataFrame(s_2_content) | pandas.DataFrame |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
msg = 'cannot be converted to datetime64'
with pytest.raises(ValueError, match=msg):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with pytest.raises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7),
datetime(2013, 10, 8),
datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values
result = DatetimeIndex(data, freq=pd.offsets.BDay())
expected = DatetimeIndex(['2013-10-07',
'2013-10-08',
'2013-10-09'],
freq='B')
tm.assert_index_equal(result, expected)
def test_verify_integrity_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(['1/1/2000'], verify_integrity=False)
def test_range_kwargs_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000', freq='D')
def test_integer_values_and_tz_deprecated(self):
# GH-24559
values = np.array([946684800000000000])
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(values, tz='US/Central')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
tm.assert_index_equal(result, expected)
# but UTC is *not* deprecated.
with tm.assert_produces_warning(None):
result = DatetimeIndex(values, tz='UTC')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
tm.assert_index_equal(rng, exp)
msg = 'periods must be a number, got foo'
with pytest.raises(TypeError, match=msg):
date_range(start='1/1/2000', periods='foo', freq='D')
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000')
with pytest.raises(TypeError):
DatetimeIndex('1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# string with NaT
strings = np.array(['2000-01-01', '2000-01-02', 'NaT'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# non-conforming
pytest.raises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'], freq='D')
pytest.raises(ValueError, date_range, start='2011-01-01',
freq='b')
pytest.raises(ValueError, date_range, end='2011-01-01',
freq='B')
pytest.raises(ValueError, date_range, periods=10, freq='D')
@pytest.mark.parametrize('freq', ['AS', 'W-SUN'])
def test_constructor_datetime64_tzformat(self, freq):
# see GH#6572: ISO 8601 format results in pytz.FixedOffset
idx = date_range('2013-01-01T00:00:00-05:00',
'2016-01-01T23:59:59-05:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013-01-01T00:00:00+09:00',
'2016-01-01T23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
# Non ISO 8601 format results in dateutil.tz.tzoffset
idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00',
freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013/1/1 0:00:00+9:00',
'2016/1/1 23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
expected = DatetimeIndex(['2013-01-01', '2013-01-02']
).tz_localize('US/Eastern')
tm.assert_index_equal(idx, expected)
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
tz='US/Eastern')
tm.assert_index_equal(idx, expected)
# if we already have a tz and its not the same, then raise
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
pytest.raises(ValueError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns]'))
# this is effectively trying to convert tz's
pytest.raises(TypeError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns, CET]'))
pytest.raises(ValueError,
lambda: DatetimeIndex(
idx, tz='CET',
dtype='datetime64[ns, US/Eastern]'))
result = DatetimeIndex(idx, dtype='datetime64[ns, US/Eastern]')
tm.assert_index_equal(idx, result)
def test_constructor_name(self):
idx = date_range(start='2000-01-01', periods=1, freq='A',
name='TEST')
assert idx.name == 'TEST'
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
assert idx.nanosecond[0] == t1.nanosecond
def test_disallow_setting_tz(self):
# GH 3746
dti = DatetimeIndex(['2010'], tz='UTC')
with pytest.raises(AttributeError):
dti.tz = pytz.timezone('US/Pacific')
@pytest.mark.parametrize('tz', [
None, 'America/Los_Angeles', pytz.timezone('America/Los_Angeles'),
Timestamp('2000', tz='America/Los_Angeles').tz])
def test_constructor_start_end_with_tz(self, tz):
# GH 18595
start = Timestamp('2013-01-01 06:00:00', tz='America/Los_Angeles')
end = Timestamp('2013-01-02 06:00:00', tz='America/Los_Angeles')
result = | date_range(freq='D', start=start, end=end, tz=tz) | pandas.date_range |
import requests
import pandas as pd
import ftplib
import io
import re
import json
import datetime
try:
from requests_html import HTMLSession
except Exception:
print("""Warning - Certain functionality
requires requests_html, which is not installed.
Install using:
pip install requests_html
After installation, you may have to restart your Python session.""")
base_url = "https://query1.finance.yahoo.com/v8/finance/chart/"
def build_url(ticker, start_date = None, end_date = None, interval = "1d"):
if end_date is None:
end_seconds = int(pd.Timestamp("now").timestamp())
else:
end_seconds = int(pd.Timestamp(end_date).timestamp())
if start_date is None:
start_seconds = 7223400
else:
start_seconds = int(pd.Timestamp(start_date).timestamp())
site = base_url + ticker
params = {"period1": start_seconds, "period2": end_seconds,
"interval": interval.lower(), "events": "div,splits"}
return site, params
def force_float(elt):
try:
return float(elt)
except:
return elt
def _convert_to_numeric(s):
if "M" in s:
s = s.strip("M")
return force_float(s) * 1_000_000
if "B" in s:
s = s.strip("B")
return force_float(s) * 1_000_000_000
return force_float(s)
def get_data(ticker, start_date = None, end_date = None, index_as_date = True,
interval = "1d"):
'''Downloads historical stock price data into a pandas data frame. Interval
must be "1d", "1wk", "1mo", or "1m" for daily, weekly, monthly, or minute data.
Intraday minute data is limited to 7 days.
@param: ticker
@param: start_date = None
@param: end_date = None
@param: index_as_date = True
@param: interval = "1d"
'''
if interval not in ("1d", "1wk", "1mo", "1m"):
raise AssertionError("interval must be of of '1d', '1wk', '1mo', or '1m'")
# build and connect to URL
site, params = build_url(ticker, start_date, end_date, interval)
resp = requests.get(site, params = params)
if not resp.ok:
raise AssertionError(resp.json())
# get JSON response
data = resp.json()
# get open / high / low / close data
frame = pd.DataFrame(data["chart"]["result"][0]["indicators"]["quote"][0])
# get the date info
temp_time = data["chart"]["result"][0]["timestamp"]
if interval != "1m":
# add in adjclose
frame["adjclose"] = data["chart"]["result"][0]["indicators"]["adjclose"][0]["adjclose"]
frame.index = pd.to_datetime(temp_time, unit = "s")
frame.index = frame.index.map(lambda dt: dt.floor("d"))
frame = frame[["open", "high", "low", "close", "adjclose", "volume"]]
else:
frame.index = pd.to_datetime(temp_time, unit = "s")
frame = frame[["open", "high", "low", "close", "volume"]]
frame['ticker'] = ticker.upper()
if not index_as_date:
frame = frame.reset_index()
frame.rename(columns = {"index": "date"}, inplace = True)
return frame
def tickers_sp500(include_company_data = False):
'''Downloads list of tickers currently listed in the S&P 500 '''
# get list of all S&P 500 stocks
sp500 = pd.read_html("https://en.wikipedia.org/wiki/List_of_S%26P_500_companies")[0]
sp500["Symbol"] = sp500["Symbol"].str.replace(".", "-")
if include_company_data:
return sp500
sp_tickers = sp500.Symbol.tolist()
sp_tickers = sorted(sp_tickers)
return sp_tickers
def tickers_nasdaq(include_company_data = False):
'''Downloads list of tickers currently listed in the NASDAQ'''
ftp = ftplib.FTP("ftp.nasdaqtrader.com")
ftp.login()
ftp.cwd("SymbolDirectory")
r = io.BytesIO()
ftp.retrbinary('RETR nasdaqlisted.txt', r.write)
if include_company_data:
r.seek(0)
data = pd.read_csv(r, sep = "|")
return data
info = r.getvalue().decode()
splits = info.split("|")
tickers = [x for x in splits if "\r\n" in x]
tickers = [x.split("\r\n")[1] for x in tickers if "NASDAQ" not in x != "\r\n"]
tickers = [ticker for ticker in tickers if "File" not in ticker]
ftp.close()
return tickers
def tickers_other(include_company_data = False):
'''Downloads list of tickers currently listed in the "otherlisted.txt"
file on "ftp.nasdaqtrader.com" '''
ftp = ftplib.FTP("ftp.nasdaqtrader.com")
ftp.login()
ftp.cwd("SymbolDirectory")
r = io.BytesIO()
ftp.retrbinary('RETR otherlisted.txt', r.write)
if include_company_data:
r.seek(0)
data = pd.read_csv(r, sep = "|")
return data
info = r.getvalue().decode()
splits = info.split("|")
tickers = [x for x in splits if "\r\n" in x]
tickers = [x.split("\r\n")[1] for x in tickers]
tickers = [ticker for ticker in tickers if "File" not in ticker]
ftp.close()
return tickers
def tickers_dow(include_company_data = False):
'''Downloads list of currently traded tickers on the Dow'''
site = "https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average"
table = pd.read_html(site, attrs = {"id":"constituents"})[0]
if include_company_data:
return table
dow_tickers = sorted(table['Symbol'].tolist())
return dow_tickers
def tickers_ibovespa(include_company_data = False):
'''Downloads list of currently traded tickers on the Ibovespa, Brazil'''
table = pd.read_html("https://pt.wikipedia.org/wiki/Lista_de_companhias_citadas_no_Ibovespa")[0]
table.columns = ["Symbol", "Share", "Sector", "Type", "Site"]
if include_company_data:
return table
ibovespa_tickers = sorted(table.Symbol.tolist())
return ibovespa_tickers
def tickers_nifty50(include_company_data = False):
'''Downloads list of currently traded tickers on the NIFTY 50, India'''
site = "https://finance.yahoo.com/quote/%5ENSEI/components?p=%5ENSEI"
table = pd.read_html(site)[0]
if include_company_data:
return table
nifty50 = sorted(table['Symbol'].tolist())
return nifty50
def tickers_niftybank():
''' Currently traded tickers on the NIFTY BANK, India '''
niftybank = ['AXISBANK', 'KOTAKBANK', 'HDFCBANK', 'SBIN', 'BANKBARODA', 'INDUSINDBK', 'PNB', 'IDFCFIRSTB', 'ICICIBANK', 'RBLBANK', 'FEDERALBNK', 'BANDHANBNK']
return niftybank
def tickers_ftse100(include_company_data = False):
'''Downloads a list of the tickers traded on the FTSE 100 index'''
table = pd.read_html("https://en.wikipedia.org/wiki/FTSE_100_Index", attrs = {"id": "constituents"})[0]
if include_company_data:
return table
return sorted(table.EPIC.tolist())
def tickers_ftse250(include_company_data = False):
'''Downloads a list of the tickers traded on the FTSE 250 index'''
table = pd.read_html("https://en.wikipedia.org/wiki/FTSE_250_Index", attrs = {"id": "constituents"})[0]
table.columns = ["Company", "Ticker"]
if include_company_data:
return table
return sorted(table.Ticker.tolist())
def get_quote_table(ticker , dict_result = True, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}):
'''Scrapes data elements found on Yahoo Finance's quote page
of input ticker
@param: ticker
@param: dict_result = True
'''
site = "https://finance.yahoo.com/quote/" + ticker + "?p=" + ticker
tables = pd.read_html(requests.get(site, headers=headers).text)
data = tables[0].append(tables[1])
data.columns = ["attribute" , "value"]
quote_price = pd.DataFrame(["Quote Price", get_live_price(ticker)]).transpose()
quote_price.columns = data.columns.copy()
data = data.append(quote_price)
data = data.sort_values("attribute")
data = data.drop_duplicates().reset_index(drop = True)
data["value"] = data.value.map(force_float)
if dict_result:
result = {key : val for key,val in zip(data.attribute , data.value)}
return result
return data
def get_stats(ticker, headers = {'User-agent': 'Mozilla/5.0'}):
'''Scrapes information from the statistics tab on Yahoo Finance
for an input ticker
@param: ticker
'''
stats_site = "https://finance.yahoo.com/quote/" + ticker + \
"/key-statistics?p=" + ticker
tables = pd.read_html(requests.get(stats_site, headers=headers).text)
tables = [table for table in tables[1:] if table.shape[1] == 2]
table = tables[0]
for elt in tables[1:]:
table = table.append(elt)
table.columns = ["Attribute" , "Value"]
table = table.reset_index(drop = True)
return table
def get_stats_valuation(ticker, headers = {'User-agent': 'Mozilla/5.0'}):
'''Scrapes Valuation Measures table from the statistics tab on Yahoo Finance
for an input ticker
@param: ticker
'''
stats_site = "https://finance.yahoo.com/quote/" + ticker + \
"/key-statistics?p=" + ticker
tables = pd.read_html(requests.get(stats_site, headers=headers).text)
tables = [table for table in tables if "Trailing P/E" in table.iloc[:,0].tolist()]
table = tables[0].reset_index(drop = True)
return table
def _parse_json(url, headers = {'User-agent': 'Mozilla/5.0'}):
html = requests.get(url=url, headers = headers).text
json_str = html.split('root.App.main =')[1].split(
'(this)')[0].split(';\n}')[0].strip()
try:
data = json.loads(json_str)[
'context']['dispatcher']['stores']['QuoteSummaryStore']
except:
return '{}'
else:
# return data
new_data = json.dumps(data).replace('{}', 'null')
new_data = re.sub(r'\{[\'|\"]raw[\'|\"]:(.*?),(.*?)\}', r'\1', new_data)
json_info = json.loads(new_data)
return json_info
def _parse_table(json_info):
df = pd.DataFrame(json_info)
if df.empty:
return df
del df["maxAge"]
df.set_index("endDate", inplace=True)
df.index = pd.to_datetime(df.index, unit="s")
df = df.transpose()
df.index.name = "Breakdown"
return df
def get_income_statement(ticker, yearly = True):
'''Scrape income statement from Yahoo Finance for a given ticker
@param: ticker
'''
income_site = "https://finance.yahoo.com/quote/" + ticker + \
"/financials?p=" + ticker
json_info = _parse_json(income_site)
if yearly:
temp = json_info["incomeStatementHistory"]["incomeStatementHistory"]
else:
temp = json_info["incomeStatementHistoryQuarterly"]["incomeStatementHistory"]
return _parse_table(temp)
def get_balance_sheet(ticker, yearly = True):
'''Scrapes balance sheet from Yahoo Finance for an input ticker
@param: ticker
'''
balance_sheet_site = "https://finance.yahoo.com/quote/" + ticker + \
"/balance-sheet?p=" + ticker
json_info = _parse_json(balance_sheet_site)
try:
if yearly:
temp = json_info["balanceSheetHistory"]["balanceSheetStatements"]
else:
temp = json_info["balanceSheetHistoryQuarterly"]["balanceSheetStatements"]
except:
temp = []
return _parse_table(temp)
def get_cash_flow(ticker, yearly = True):
'''Scrapes the cash flow statement from Yahoo Finance for an input ticker
@param: ticker
'''
cash_flow_site = "https://finance.yahoo.com/quote/" + \
ticker + "/cash-flow?p=" + ticker
json_info = _parse_json(cash_flow_site)
if yearly:
temp = json_info["cashflowStatementHistory"]["cashflowStatements"]
else:
temp = json_info["cashflowStatementHistoryQuarterly"]["cashflowStatements"]
return _parse_table(temp)
def get_financials(ticker, yearly = True, quarterly = True):
'''Scrapes financials data from Yahoo Finance for an input ticker, including
balance sheet, cash flow statement, and income statement. Returns dictionary
of results.
@param: ticker
@param: yearly = True
@param: quarterly = True
'''
if not yearly and not quarterly:
raise AssertionError("yearly or quarterly must be True")
financials_site = "https://finance.yahoo.com/quote/" + ticker + \
"/financials?p=" + ticker
json_info = _parse_json(financials_site)
result = {}
if yearly:
temp = json_info["incomeStatementHistory"]["incomeStatementHistory"]
table = _parse_table(temp)
result["yearly_income_statement"] = table
temp = json_info["balanceSheetHistory"]["balanceSheetStatements"]
table = _parse_table(temp)
result["yearly_balance_sheet"] = table
temp = json_info["cashflowStatementHistory"]["cashflowStatements"]
table = _parse_table(temp)
result["yearly_cash_flow"] = table
if quarterly:
temp = json_info["incomeStatementHistoryQuarterly"]["incomeStatementHistory"]
table = _parse_table(temp)
result["quarterly_income_statement"] = table
temp = json_info["balanceSheetHistoryQuarterly"]["balanceSheetStatements"]
table = _parse_table(temp)
result["quarterly_balance_sheet"] = table
temp = json_info["cashflowStatementHistoryQuarterly"]["cashflowStatements"]
table = _parse_table(temp)
result["quarterly_cash_flow"] = table
return result
def get_holders(ticker, headers = {'User-agent': 'Mozilla/5.0'}):
'''Scrapes the Holders page from Yahoo Finance for an input ticker
@param: ticker
'''
holders_site = "https://finance.yahoo.com/quote/" + \
ticker + "/holders?p=" + ticker
tables = pd.read_html(requests.get(holders_site, headers=headers).text)
table_names = ["Major Holders" , "Direct Holders (Forms 3 and 4)" ,
"Top Institutional Holders" , "Top Mutual Fund Holders"]
table_mapper = {key : val for key,val in zip(table_names , tables)}
return table_mapper
def get_analysts_info(ticker, headers = {'User-agent': 'Mozilla/5.0'}):
'''Scrapes the Analysts page from Yahoo Finance for an input ticker
@param: ticker
'''
analysts_site = "https://finance.yahoo.com/quote/" + ticker + \
"/analysts?p=" + ticker
tables = pd.read_html(requests.get(analysts_site, headers=headers).text)
table_names = [table.columns[0] for table in tables]
table_mapper = {key : val for key , val in zip(table_names , tables)}
return table_mapper
def get_live_price(ticker):
'''Gets the live price of input ticker
@param: ticker
'''
df = get_data(ticker, end_date = pd.Timestamp.today() + pd.DateOffset(10))
return df.close[-1]
def _raw_get_daily_info(site):
session = HTMLSession()
resp = session.get(site)
tables = pd.read_html(resp.html.raw_html)
df = tables[0].copy()
df.columns = tables[0].columns
del df["52 Week Range"]
df["% Change"] = df["% Change"].map(lambda x: float(x.strip("%+").replace(",", "")))
fields_to_change = [x for x in df.columns.tolist() if "Vol" in x \
or x == "Market Cap"]
for field in fields_to_change:
if type(df[field][0]) == str:
df[field] = df[field].map(_convert_to_numeric)
session.close()
return df
def get_day_most_active(count: int = 100):
return _raw_get_daily_info(f"https://finance.yahoo.com/most-active?offset=0&count={count}")
def get_day_gainers(count: int = 100):
return _raw_get_daily_info(f"https://finance.yahoo.com/gainers?offset=0&count={count}")
def get_day_losers(count: int = 100):
return _raw_get_daily_info(f"https://finance.yahoo.com/losers?offset=0&count={count}")
def get_top_crypto():
'''Gets the top 100 Cryptocurrencies by Market Cap'''
session = HTMLSession()
resp = session.get("https://finance.yahoo.com/cryptocurrencies?offset=0&count=100")
tables = pd.read_html(resp.html.raw_html)
df = tables[0].copy()
df["% Change"] = df["% Change"].map(lambda x: float(str(x).strip("%").\
strip("+").\
replace(",", "")))
del df["52 Week Range"]
del df["1 Day Chart"]
fields_to_change = [x for x in df.columns.tolist() if "Volume" in x \
or x == "Market Cap" or x == "Circulating Supply"]
for field in fields_to_change:
if type(df[field][0]) == str:
df[field] = df[field].map(lambda x: _convert_to_numeric(str(x)))
session.close()
return df
def get_dividends(ticker, start_date = None, end_date = None, index_as_date = True):
'''Downloads historical dividend data into a pandas data frame.
@param: ticker
@param: start_date = None
@param: end_date = None
@param: index_as_date = True
'''
# build and connect to URL
site, params = build_url(ticker, start_date, end_date, "1d")
resp = requests.get(site, params = params)
if not resp.ok:
return pd.DataFrame()
# get JSON response
data = resp.json()
# check if there is data available for dividends
if "events" not in data["chart"]["result"][0] or "dividends" not in data["chart"]["result"][0]['events']:
return pd.DataFrame()
# get the dividend data
frame = pd.DataFrame(data["chart"]["result"][0]['events']['dividends'])
frame = frame.transpose()
frame.index = pd.to_datetime(frame.index, unit = "s")
frame.index = frame.index.map(lambda dt: dt.floor("d"))
# sort in chronological order
frame = frame.sort_index()
frame['ticker'] = ticker.upper()
# remove old date column
frame = frame.drop(columns='date')
frame = frame.rename({'amount': 'dividend'}, axis = 'columns')
if not index_as_date:
frame = frame.reset_index()
frame.rename(columns = {"index": "date"}, inplace = True)
return frame
def get_splits(ticker, start_date = None, end_date = None, index_as_date = True):
'''Downloads historical stock split data into a pandas data frame.
@param: ticker
@param: start_date = None
@param: end_date = None
@param: index_as_date = True
'''
# build and connect to URL
site, params = build_url(ticker, start_date, end_date, "1d")
resp = requests.get(site, params = params)
if not resp.ok:
raise AssertionError(resp.json())
# get JSON response
data = resp.json()
# check if there is data available for splits
if "splits" not in data["chart"]["result"][0]['events']:
raise AssertionError("There is no data available on stock splits, or none have occured")
# get the split data
frame = pd.DataFrame(data["chart"]["result"][0]['events']['splits'])
frame = frame.transpose()
frame.index = pd.to_datetime(frame.index, unit = "s")
frame.index = frame.index.map(lambda dt: dt.floor("d"))
# sort in to chronological order
frame = frame.sort_index()
frame['ticker'] = ticker.upper()
# remove unnecessary columns
frame = frame.drop(columns=['date', 'denominator', 'numerator'])
if not index_as_date:
frame = frame.reset_index()
frame.rename(columns = {"index": "date"}, inplace = True)
return frame
def get_earnings(ticker):
'''Scrapes earnings data from Yahoo Finance for an input ticker
@param: ticker
'''
result = {
"quarterly_results": pd.DataFrame(),
"yearly_revenue_earnings": pd.DataFrame(),
"quarterly_revenue_earnings": pd.DataFrame()
}
financials_site = "https://finance.yahoo.com/quote/" + ticker + \
"/financials?p=" + ticker
json_info = _parse_json(financials_site)
if "earnings" not in json_info:
return result
temp = json_info["earnings"]
if temp == None:
return result
result["quarterly_results"] = pd.DataFrame.from_dict(temp["earningsChart"]["quarterly"])
result["yearly_revenue_earnings"] = pd.DataFrame.from_dict(temp["financialsChart"]["yearly"])
result["quarterly_revenue_earnings"] = pd.DataFrame.from_dict(temp["financialsChart"]["quarterly"])
return result
### Earnings functions
def _parse_earnings_json(url):
resp = requests.get(url)
content = resp.content.decode(encoding='utf-8', errors='strict')
page_data = [row for row in content.split(
'\n') if row.startswith('root.App.main = ')][0][:-1]
page_data = page_data.split('root.App.main = ', 1)[1]
return json.loads(page_data)
def get_next_earnings_date(ticker):
base_earnings_url = 'https://finance.yahoo.com/quote'
new_url = base_earnings_url + "/" + ticker
parsed_result = _parse_earnings_json(new_url)
temp = parsed_result['context']['dispatcher']['stores']['QuoteSummaryStore']['calendarEvents']['earnings']['earningsDate'][0]['raw']
return datetime.datetime.fromtimestamp(temp)
def get_earnings_history(ticker):
'''Inputs: @ticker
Returns the earnings calendar history of the input ticker with
EPS actual vs. expected data.'''
url = 'https://finance.yahoo.com/calendar/earnings?symbol=' + ticker
result = _parse_earnings_json(url)
return result["context"]["dispatcher"]["stores"]["ScreenerResultsStore"]["results"]["rows"]
def get_earnings_for_date(date, offset = 0, count = 1):
'''Inputs: @date
Returns a dictionary of stock tickers with earnings expected on the
input date. The dictionary contains the expected EPS values for each
stock if available.'''
base_earnings_url = 'https://finance.yahoo.com/calendar/earnings'
if offset >= count:
return []
temp = pd.Timestamp(date)
date = temp.strftime("%Y-%m-%d")
dated_url = '{0}?day={1}&offset={2}&size={3}'.format(
base_earnings_url, date, offset, 100)
result = _parse_earnings_json(dated_url)
stores = result['context']['dispatcher']['stores']
earnings_count = stores['ScreenerCriteriaStore']['meta']['total']
new_offset = offset + 100
more_earnings = get_earnings_for_date(date, new_offset, earnings_count)
current_earnings = stores['ScreenerResultsStore']['results']['rows']
total_earnings = current_earnings + more_earnings
return total_earnings
def get_earnings_in_date_range(start_date, end_date):
'''Inputs: @start_date
@end_date
Returns the stock tickers with expected EPS data for all dates in the
input range (inclusive of the start_date and end_date.'''
earnings_data = []
days_diff = pd.Timestamp(end_date) - pd.Timestamp(start_date)
days_diff = days_diff.days
current_date = | pd.Timestamp(start_date) | pandas.Timestamp |
import fire
from rest_api_asyncio import UniprotClient, get_db
import pandas as pd
from pandas import DataFrame
import gtfparse
import time
from tqdm import tqdm
from pathlib import Path
from functools import reduce
import glob
import sys
import urllib3
import asyncio
OUT_HEADER_BASE = [
'gene_id',
'gene_names',
'uniprot_id',
'protein_names',
'protein_existence']
OUT_HEADER_REF = [
'go_id',
'go_term',
'interpro_ids',
'interpro_names',
'pfam_ids',
'pfam_names',
'feature_names',
'feature_dbs',
'pubmed_ids'
]
DEFAULT_CONCUR_REQ = 10
DEFAULT_RETRY_TIMES = 2
def format_df(gene_df, sep=',', empty_rep=None, by='gene_id'):
def my_unique(x, sep=','):
unique_x = pd.unique(x.dropna())
if str(unique_x.dtype) == 'float64':
unique_x = unique_x.astype('int')
unique_x = [str(each) for each in unique_x]
if not unique_x:
if empty_rep is None:
return None
else:
unique_x = [empty_rep]
return sep.join(unique_x)
gene_df = gene_df.groupby(by).agg(my_unique, sep)
return gene_df.reset_index()
def save_download(df, middle_file):
middle_file = Path(middle_file)
if middle_file.exists():
write_mode = 'a'
write_header = False
else:
write_header = True
write_mode = 'w'
df.to_csv(middle_file, header=write_header,
mode=write_mode, index=False,
sep='\t')
def save_download_to_dir(df, middle_file_dir, file_id):
middle_file_dir = Path(middle_file_dir)
middle_file_dir.mkdir(parents=True, exist_ok=True)
middle_file = middle_file_dir / f'{file_id}.txt'
df.to_csv(middle_file, index=False, sep='\t')
def idlist2df(id_list, col_name, identity_map=None):
if id_list:
list_df = DataFrame(id_list, columns=[col_name])
if identity_map is not None:
for key, val in identity_map.items():
list_df.loc[:, key] = val
return list_df
def refdb_anno(anno_db, db_name):
db_ids = [each['id'] for each in anno_db
if each['type'] == db_name]
db_names = [each['properties']['entry name'] for each in anno_db
if each['type'] == db_name]
return db_ids, db_names
def featuredb_anno(featuredb):
no_use_ft = ('', 'DSL')
feature_db_dict = dict()
for each in featuredb:
if 'description' not in each:
continue
elif each['description'] in no_use_ft:
continue
elif 'evidences' not in each:
continue
elif 'source' not in each['evidences'][0]:
continue
else:
feature_db_name = each['evidences'][0]['source']['name']
feature_db_id = each['evidences'][0]['source']['id']
if feature_db_name == 'Pfam':
continue
feature_db_dict.setdefault(
'feature_names', []).append(each['description'])
feature_db_f_name = f'{feature_db_name}:{feature_db_id}'
feature_db_dict.setdefault(
'feature_dbs', []
).append(feature_db_f_name)
return feature_db_dict
def commentdb_anno(commentdb):
comment_db_dict = dict()
for each in commentdb:
each_type = f'uniprot_comments({each["type"]})'
if 'text' not in each:
continue
for each_type_cm in each['text']:
if 'value' in each_type_cm:
comment_db_dict.setdefault(each_type, []).append(
each_type_cm['value']
)
return comment_db_dict
def extract_anno_inf(decoded, uniprot_id, middle_file):
anno_dfs = list()
identity_map = {'uniprot_id': uniprot_id}
if 'gene' in decoded:
gene_db = decoded['gene']
gene_names = [each['name']['value'] for each in gene_db
if 'name' in each]
anno_dfs.append(idlist2df(gene_names, 'gene_names',
identity_map=identity_map))
if 'protein' in decoded:
protein_db = decoded['protein']
products = None
if 'submittedName' in protein_db:
products = [each['fullName']['value'] for
each in protein_db['submittedName']
if 'value' in each['fullName']]
elif 'recommendedName' in protein_db:
products = [protein_db['recommendedName']['fullName']['value']]
if products is not None:
anno_dfs.append(idlist2df(products, 'protein_names',
identity_map=identity_map))
if 'proteinExistence' in decoded:
anno_dfs.append(idlist2df([decoded['proteinExistence']],
'protein_existence',
identity_map=identity_map))
if 'comments' in decoded:
comment_db = decoded['comments']
comment_db_dict = commentdb_anno(comment_db)
if comment_db_dict:
for key, val in comment_db_dict.items():
anno_dfs.append(idlist2df(val, key,
identity_map=identity_map))
if 'dbReferences' in decoded:
anno_db = decoded['dbReferences']
interpro_ids, interpro_names = refdb_anno(anno_db, 'InterPro')
if interpro_ids:
anno_dfs.append(idlist2df(interpro_ids, 'interpro_ids',
identity_map=identity_map))
anno_dfs.append(idlist2df(interpro_names, 'interpro_names',
identity_map=identity_map))
pfam_ids, pfam_names = refdb_anno(anno_db, 'Pfam')
if pfam_ids:
anno_dfs.append(idlist2df(pfam_ids, 'pfam_ids',
identity_map=identity_map))
anno_dfs.append(idlist2df(pfam_names, 'pfam_names',
identity_map=identity_map))
if 'features' in decoded:
feature_db = decoded['features']
feature_db_dict = featuredb_anno(feature_db)
if feature_db_dict:
for key, val in feature_db_dict.items():
anno_dfs.append(idlist2df(val, key,
identity_map=identity_map))
if 'references' in decoded:
citation_db = decoded['references']
pubmed_ids = [each['citation']['dbReferences'][0]['id']
for each in citation_db
if 'dbReferences' in each['citation']]
if pubmed_ids:
anno_dfs.append(idlist2df(pubmed_ids, 'pubmed_ids',
identity_map=identity_map))
if anno_dfs:
anno_dfs = [each for each in anno_dfs
if each is not None]
anno_df = reduce(pd.merge, anno_dfs)
else:
anno_df = | DataFrame([None], columns=['protein_existence']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: <NAME> <<EMAIL>>
Date: Oct 2019
"""
import numpy as np
import pandas as pd
from .pipe import Pipe
from .. import precomp_funs as _pf
class CHPPlant(Pipe):
"""
Construct a CHP plant.
Can be added to the simulation environment by using the following method:
.add_part(Pipe, name, volume=..., grid_points=..., outer_diameter=...,
shell_thickness=...)
Part creation parameters
------------------------
Pipe : class name
This parameter can\'t be changed and needs to be passed exactly like
this.
name : string
Pipe identifier as string. Needs to be unique.
length : integer, float
Thermal energy storage volume in [m^3].
grid_points : integer
Number of grid points to discretize the pipe with.
For part initialization the following additional parameters need to be
passed:
.init_part(insulation_thickness=..., insulation_lambda=...)
Part initialization parameters
------------------------------
insulation_thickness : float
in [m]
insulation_lambda : float
in [W/(m*K)]
"""
def __init__(
self,
name,
master_cls,
eta_el=0.32733,
p2h_ratio=0.516796,
max_slope_el=0.05,
min_off_time=600,
min_on_time=600,
chp_on_at_start=False,
**kwds
):
self.constr_type = 'CHPPlant' # define construction type
# since this part is a subclass of Pipe, initialize Pipe:
super().__init__(
name, master_cls, **kwds, constr_type=self.constr_type
)
# check for default args, print messages and set them to kwds
defaults = (
('p2h_ratio', 0.516796),
('eta_el', 0.32733),
('max_slope_el', 0.05),
('min_off_time', 600),
('min_on_time', 600),
)
base_dflt_str = (
' ---> CHP plant {0}: No `{1}` given. '
'Using default value of `{2:.3f}`.'
)
if not self._models.suppress_printing:
print(
'Setting defaults. Set a value to `None` to get more '
'information on effect, type and range.'
)
for dflt_nm, dflt_val in defaults:
if dflt_val == locals()[dflt_nm]:
if dflt_val is not None: # only if not None, else print err
if not master_cls.suppress_printing:
print(
base_dflt_str.format(self.name, dflt_nm, dflt_val)
)
kwds[dflt_nm] = dflt_val
else:
kwds[dflt_nm] = locals()[dflt_nm]
# define arguments and errors:
self._aae = { # arguments and errors
'power_electrical': (
'CHP plant electrical power output in [W]. Type: int, float. '
'Range: X > 0'
),
'p2h_ratio': (
'CHP plant power to heat ratio. Type: float. '
'Range: 0 < X < 1'
),
'eta_el': (
'CHP plant electrical efficiency. Type: float. '
'Range: 0 < X < 1'
),
'modulation_range': (
'CHP plant electrical modulation range. Type: list, tuple.'
),
'max_slope_el': (
'CHP plant maximum electrical ramp in %/s. Type: float. '
'Range: 0 < X <= 1'
),
'min_off_time': (
'CHP plant minimum consecutive off time in [s] before '
'switching on is allowed. Type: int, float. Range: X >= 0.'
),
'min_on_time': (
'CHP plant minimum consecutive on time in [s] before '
'switching off is allowed. Plant will be kept at min. '
'modulation if shutdown is requested until min. on time is '
'reached. Emergency shutdown due to overtemperature will '
'override this timer. Type: int, float. Range: X >= 0.'
),
'heat_spread': (
'CHP plant heat spread, that is if a \'single\' cell, a '
'cell \'range\' or \'all\' cells of the CHP plant are heated '
'by the thermal power output. Type: str.'
),
'lower_limit': (
'Lower limit for controller action on the modulation. '
'Type: int, float. Range: X >= 0.'
),
'upper_limit': (
'Upper limit for controller action on the modulation. '
'Type: int, float. Range: X <= 1.'
),
}
# check for arguments:
self._print_arg_errs(self.constr_type, name, self._aae, kwds)
assert isinstance(kwds['power_electrical'], (int, float)) and (
kwds['power_electrical'] > 0.0
), (
self._base_err
+ self._arg_err.format('power_electrical')
+ self._aae['power_electrical']
)
self.chp_power_el = kwds['power_electrical']
assert isinstance(kwds['p2h_ratio'], float) and (
0.0 < kwds['p2h_ratio'] < 1.0
), (
self._base_err
+ self._arg_err.format('p2h_ratio')
+ self._aae['p2h_ratio']
)
self.p2h_ratio = kwds['p2h_ratio']
# base p2h-ratio of the CHP plant is about .5, the exact ratio is
# given by:
self._p2h_base = 1 / _pf.chp_thermal_power(1.0)
# since this is already included in the polynomials, the given p2h
# and the base p2h must be combined into an additional factor:
self._p2h_ratio = self._p2h_base / self.p2h_ratio
# electrical efficiency:
assert isinstance(kwds['eta_el'], float) and (
0.0 < kwds['eta_el'] < 1.0
), (
self._base_err
+ self._arg_err.format('eta_el')
+ self._aae['eta_el']
)
self.eta_el = kwds['eta_el']
# to get the correct gas consumption given by polynomials in dependency
# of Pel, an additional conversion factor must be calculated. The base
# eta el used in the polynomials is
self._eta_el_base = 1 / _pf.chp_gas_power(1.0)
self._eta_el_fac = self._eta_el_base / self.eta_el
# get thermal efficiency from eta el
self.eta_th = self.eta_el / self.p2h_ratio
if 'eta_sum' not in kwds:
eta_sum = 0.961
assert (self.eta_el + self.eta_th) <= eta_sum, (
self._base_err
+ self._arg_err.format('eta_el + eta_th')
+ 'The current total CHP plant efficiency is {0:.4f}, but must '
'be <={1:.3f}. If a higher total efficiency '
'shall be set, adjust the `eta_sum` parameter.'.format(
self.eta_el + self.eta_th, eta_sum
)
)
else:
assert (
isinstance(kwds['eta_sum'], (int, float))
and kwds['eta_sum'] > 0
)
assert (self.eta_el + self.eta_th) < kwds['eta_sum'], (
self._base_err
+ self._arg_err.format('eta_el + eta_th')
+ 'The total CHP plant efficiency must be lower than `eta_sum`.'
)
# save thermal and gas power of chp plant at max power output
self.chp_power_th = self.chp_power_el / self.p2h_ratio
self.chp_power_gas = self.chp_power_el / self.eta_el
# get modulation range:
err_mod = (
'The valid CHP plant modulation range has to be given with '
'`modulation_range=(lower, upper)` where lower and upper '
'represent the lower and upper modulation bounds, f.i. '
'`(.5, 1)` if the CHP modulation can be set between 50% and '
'100%. Values must be in the range of 0 < x <= 1.'
)
assert isinstance(kwds['modulation_range'], (tuple, list)), err_mod
self._mod_range = kwds['modulation_range']
self._mod_lower = float(self._mod_range[0])
self._mod_upper = float(self._mod_range[1])
assert isinstance(self._mod_lower, (int, float)) and (
0 < self._mod_lower <= 1
), err_mod
assert (
isinstance(self._mod_upper, (int, float))
and (0 < self._mod_upper <= 1)
and (self._mod_lower < self._mod_upper)
), err_mod
assert isinstance(kwds['max_slope_el'], float) and (
0 < kwds['max_slope_el'] <= 1
), (
self._base_err
+ self._arg_err.format('max_slope_el')
+ self._aae['max_slope_el']
)
self._max_ramp_el = kwds['max_slope_el']
self._max_ramp_th = 0.025 # 2.5%/s, NOT USED
assert isinstance(kwds['min_off_time'], (int, float)) and (
0 < kwds['min_off_time']
), (
self._base_err
+ self._arg_err.format('min_off_time')
+ self._aae['min_off_time']
)
self._min_off_time = kwds['min_off_time']
assert isinstance(kwds['min_on_time'], (int, float)) and (
0 < kwds['min_on_time']
), (
self._base_err
+ self._arg_err.format('min_on_time')
+ self._aae['min_on_time']
)
self._min_on_time = kwds['min_on_time']
self._T_chp_in_max = 75.0
self._T_chp_in_max_emrgncy = 110.0 # immediate shutdown if >temp.
# single cell array for heat flow rate:
self._dQ_heating = np.zeros(1, dtype=np.float64)
# result array for heating:
self.res_dQ = np.zeros((1, 1), dtype=np.float64)
# same for electric power:
self._Pel = np.zeros(1, dtype=np.float64)
self.res_Pel = np.zeros(1, dtype=np.float64)
# and gas consumption
self._Pgas = np.zeros(1, dtype=np.float64)
self.res_Pgas = np.zeros(1, dtype=np.float64)
# checker if chp plant is on, in startup, value for time passed since
# startup, value for time passed since shutoff,
# time in [s] to account for remaining heat after a recent shutdown,
# array for current modulation of CHP plant
self._chp_on = False
self._chp_state = np.zeros(1, dtype=np.bool) # vector to save states
self._shutdown_in_progress = False
self._startup_duration = 0.0
self._off_duration = 0.0
self._startup_at_time = 0
# has never been switched on. min off time -1 to enable switching on
# at the start of the sim.
self._shutdown_at_time = -self._min_off_time - 1.0
self._remaining_heat = 0.0
self._power_modulation = np.zeros(1, dtype=np.float64)
# max. overtemperature time. dt is the consecutive time since first
# overtemp., max_temp_exc_time the maximum time allowed before shutdown
self._dt_time_temp_exc = 0.0
# TODO: print('das noch durch kwds:')
self._max_temp_exc_time = 2 * 60
# startup and shutdown factors for thermal and el (only startup, since
# only heat can remain...) power
if not chp_on_at_start: # if CHP was switched off at the beginning
self._startup_factor_el = 0.0
self._startup_factor_th = 0.0
self._startup_factor_gas = 0.0
self._chp_on = False # chp is NOT on
self._startup_in_progress = True # chp was off, thus startup req.
self._shutdown_duration = 99999.0
self._bin_pow_fac = 0
else:
self._startup_factor_el = 1.0
self._startup_factor_th = 1.0
self._startup_factor_gas = 1.0
# has never been switched on. min on time to enable switching off
# at the start of the sim.
self._startup_at_time = -self._min_on_time # has been switched on
# has never been switched off
self._shutdown_at_time = self._startup_at_time - 1
self._chp_on = True # chp IS on
# chp was ON, thus NO startup required!
self._startup_in_progress = False
self._shutdown_duration = 99999.0
self._bin_pow_fac = 1
self._shutdown_factor_th = 0.0
# save last thermal power value before shutdown for shutdown process
self._last_dQ = np.zeros_like(self._dQ_heating)
# percentage values, at which the CHP plant is considered as on/off
self._chp_on_perc = 0.999
self._chp_off_perc = 0.01
# power factors for on/off switching for Pth (on and off) and Pel
# (only on):
self._pth_power_factor = 0.0
self._pel_power_factor = 0.0
# differential of temperature due to heating:
self.dT_heat = np.zeros_like(self.T)
# memoryview to the inlet cell for max. temperature checking
self._T_chp_in = self.T[0:1]
# if the part CAN BE controlled by the control algorithm:
self.is_actuator = True
self._actuator_CV = self._power_modulation[:] # set to be controlled
self._actuator_CV_name = 'el_power_modulation'
self._unit = '[%]' # set unit of control variable
# if the part HAS TO BE controlled by the control algorithm:
self.control_req = True
# if the part needs a special control algorithm (for parts with 2 or
# more controllable inlets/outlets/...):
self.actuator_special = False
# initialize bool if control specified:
self.ctrl_defined = False
# get heat spread
err_str = (
self._base_err
+ self._arg_err.format('heat_spread')
+ 'The heat spread has to be given with `heat_spread=X`, where '
'X is one of the following:\n'
' - \'all\': The rate of heat flow is spread equally on all '
'cells.\n'
' - \'single\': One single cell is heated with the total rate '
'of heat flow. This may cause the simulation stepsize to decrease '
'to very small stepsizes if the thermal intertia is low compared '
'to the rate of heat flow.\n'
' - \'range\': The rate of heat flow is spread equally on a '
'range of consecutive cells.'
)
assert kwds['heat_spread'] in ['all', 'single', 'range'], err_str
self._heat_spread = kwds['heat_spread']
# get other parameters depending on heated cells:
if self._heat_spread == 'all': # all cells heated equally
# cell wise rate of heat flow multiplicator:
self._heat_mult = 1 / self.num_gp
# set slice to full array:
self._heated_cells = slice(0, self.num_gp)
self._num_heated_cells = self.num_gp
elif self._heat_spread == 'single': # single cell heated
# get slice index to heated cells:
err_str = (
self._base_err
+ self._arg_err.format('heated_cells')
+ 'The heated cells have to be given with `heated_cells=X`. '
'Since `heat_spread=\'single\'` is set, X has to be an '
'integer index to the target cell for the heat flow in the '
'range of `0 <= X <= ' + str(self.num_gp - 1) + '`.'
)
assert (
'heated_cells' in kwds
and type(kwds['heated_cells']) == int
and 0 <= kwds['heated_cells'] <= (self.num_gp - 1)
), err_str
self._heated_cells = slice(
kwds['heated_cells'], kwds['heated_cells'] + 1
)
self._num_heated_cells = 1
# cell wise rate of heat flow multiplicator:
self._heat_mult = 1.0
else: # range of cells cells heated equally
# get slice index to heated cells:
err_str = (
self._base_err
+ self._arg_err.format('heated_cells')
+ 'The heated cells have to be given with `heated_cells=X`. '
'Since `heat_spread=\'range\'` is set, X has to be a range of '
'target cells for the heat flow, given as a list with '
'`X=[start, end]` where both values are integer values. '
'Additionally `start < end` and `0 <= start/end <= '
+ str(self.num_gp - 1)
+ '` must be true.\n'
'As always with indexing in python/numpy, the end-index is '
'NOT included in the selection. Thus `X=[2, 4]` will heat '
'cells 2 and 3, but NOT cell 4.'
)
assert (
'heated_cells' in kwds
and isinstance(kwds['heated_cells'], (list, tuple))
and len(kwds['heated_cells']) == 2
and isinstance(kwds['heated_cells'][0], int)
and isinstance(kwds['heated_cells'][1], int)
), err_str
start = kwds['heated_cells'][0]
end = kwds['heated_cells'][1]
# assert correct indices:
assert start < end and start >= 0 and end < self.num_gp, err_str
self._heated_cells = slice(start, end)
self._num_heated_cells = end - start
# cell wise rate of heat flow multiplicator
self._heat_mult = 1 / self._num_heated_cells
# create view to dT for easy access to heated cells:
self._dT_heated = self.dT_heat[self._heated_cells]
# view to heated cell's m*cp value:
self._mcp_heated = self._mcp[self._heated_cells]
err_str = (
self._base_err
+ self._arg_err.format('lower_limit, upper_limit')
+ 'The part was set to be an actuator and need a control with '
'`no_control=False`, thus `lower_limit` and `upper_limit` '
'in {0} have to be passed to clip the controller action on '
'the actuator to the limits.\n'
'The limits have to be given as integer or float values with '
'`lower_limit < upper_limit`.'
).format(self._unit)
self._lims = np.array( # set limits to array
[kwds['lower_limit'], kwds['upper_limit']], dtype=np.float64
)
self._llim = self._lims[0] # also save to single floats
self._ulim = self._lims[1] # also save to single floats
assert 0 <= self._lims[0] < self._lims[1], (
err_str + ' For HeatedPipe limits are additionally restricted '
'to `0 <= lower_limit < upper_limit`.'
)
# precalc arrays needed for shutdown-startup-procedure:
self._tsteps_startup = np.arange(100000) # check for up to 1e5 s
# thermal power factor during startup
self._startuptsteps = _pf.chp_startup_th(self._tsteps_startup)
# find step at which full power is reached. idx is also in seconds!
idx_on = np.argmin(np.abs(self._startuptsteps - self._chp_on_perc))
# cut arrays to that size:
self._tsteps_startup = self._tsteps_startup[1 : idx_on + 1]
self._startuptsteps = self._startuptsteps[1 : idx_on + 1]
# IMPORTANT: THIS VARIABLE **MUST NOT BE INHERITED BY SUB-CLASSES**!!
# If sub-classes are inherited from this part, this bool checker AND
# the following variables MUST BE OVERWRITTEN!
# ist the diff function fully njitted AND are all input-variables
# stored in a container?
self._diff_fully_njit = False
# self._diff_njit = pipe1D_diff # handle to njitted diff function
# input args are created in simenv _create_diff_inputs method
def init_part(self, start_modulation=0, fluegas_flow=70 / 61.1, **kwds):
"""Initialize part. Do stuff which requires built part dict."""
# since this part is a subclass of Pipe, call init_part of Pipe:
super().init_part(**kwds)
# check for default args, print messages and set them to kwds
if fluegas_flow == 70 / 61.1:
if not self._models.suppress_printing:
print(
' ---> CHP plant {0}: No `{1}` given. '
'Using default value of `{2:.3f}Nm3/kWh`.'.format(
self.name, 'fluegas_flow', fluegas_flow
)
)
kwds['fluegas_flow'] = fluegas_flow
# define arguments and errors:
self._aaei = { # arguments and errors
'connect_fluegas_hex': (
'Connect a flue gas heat exchanger directly to the CHP plant? '
'Exhaust gas flow will be passed to the HEX directly, wihout '
'need for pipes and pumps. Type: bool.'
),
# 'fluegas_flow_at_pmax': (
# 'Flue gas flow at maximum power output (100% modulation) in '
# '[Nm3/h]. Type: int, float. Range: X > 0')
'fluegas_flow': (
'Specific flue gas flow relative to the gas consumption in '
'[Nm3/kWh], f.i. 1.146 for 70Nm3 per 61.1kW gas consumption '
'(lower heating vlaue). Type: int, float. Range: X > 0'
),
}
# check for arguments:
self._print_arg_errs(self.constr_type, self.name, self._aaei, kwds)
# check modulation parameters
assert isinstance(start_modulation, (int, float)) and (
0 <= start_modulation <= 1
), 'Start modulation muss zwischen 0 und 1 liegen.'
self._actuator_CV[:] = start_modulation
# initialize the actuator
self._initialize_actuator(variable_name='_power_modulation', **kwds)
# if CHP is feeding a fluegas HEX:
assert isinstance(kwds['connect_fluegas_hex'], bool), (
self._base_err
+ self._arg_err.format('connect_fluegas_hex')
+ self._aaei['connect_fluegas_hex']
)
self._connect_fg_hex = kwds['connect_fluegas_hex']
if self._connect_fg_hex:
err_hex2 = (
self._base_err
+ self._arg_err.format('fg_hex_name')
+ 'Part name of the fluegas heat exchanger to connect.'
)
assert 'fg_hex_name' in kwds, err_hex2
err_hex3 = self._base_err + self._arg_err.format(
'fg_hex_name'
) + 'The fluegas heat exchanger with the name `{0}` was not ' 'found.'.format(
kwds['fg_hex_name']
)
assert kwds['fg_hex_name'] in self._models.parts, err_hex3
self._fg_hex_name = kwds['fg_hex_name']
# save ref to part:
self._fg_hex = self._models.parts[self._fg_hex_name]
# save view to fg hex gas volume flow cell to save values to
# (in Nm3/s):
self._fg_hex_gdv = self._fg_hex._dm_io[slice(1, 2)]
# set flow channel in this part to solved:
self._models.parts[self._fg_hex_name]._solved_ports.extend(
['fluegas_in', 'fluegas_out']
)
else: # fill values with dummies if no hex connected
self._fg_hex_gdv = np.zeros(1, dtype=np.float64)
# get flue gas flow:
assert isinstance(kwds['fluegas_flow'], (int, float)), (
self._base_err
+ self._arg_err.format('fluegas_flow')
+ self._aaei['fluegas_flow']
)
# specific flue gas flow in Nm3/kWh
self.fluegas_flow_specific = kwds['fluegas_flow']
# save in Nm3/s
# self._fg_dv_at_pmax = kwds['fluegas_flow'] / 3600
self._fg_dv_at_pmax = (
self.chp_power_gas * self.fluegas_flow_specific / 3.6e6
)
assert 0 <= self._lims[0] < self._lims[1], (
self._base_err
+ self._arg_err.format('lower_limit, upper_limit')
+ ' For a CHP plant limits are additionally restricted '
'to `0 <= lower_limit < upper_limit`.'
)
# get maximum gas power at modulation == 1:
self._Pgas_max = (
_pf.chp_gas_power(1.0) * self.chp_power_el * self._eta_el_fac
)
def _special_array_init(self, num_steps):
self.res_dQ = np.zeros((num_steps + 1,) + self._dQ_heating.shape)
self.res_Pel = np.zeros((num_steps + 1,) + self._Pel.shape)
self.res_Pgas = np.zeros((num_steps + 1,) + self._Pgas.shape)
self._chp_state = np.zeros((num_steps + 1,) + self._chp_state.shape)
def __deprecated_special_free_memory(
self, disk_store, part, array_length, hdf_idx, stepnum
):
disk_store['store_tmp'].append(
part + '/heating',
pd.DataFrame(data=self.res_dQ[:array_length, ...], index=hdf_idx),
)
disk_store['store_tmp'].append(
part + '/Pel',
pd.DataFrame(data=self.res_Pel[:array_length, ...], index=hdf_idx),
)
disk_store['store_tmp'].append(
part + '/Pgas',
pd.DataFrame(
data=self.res_Pgas[:array_length, ...], index=hdf_idx
),
)
disk_store['store_tmp'].append(
part + '/chp_state',
pd.DataFrame(
data=self._chp_state[:array_length, ...], index=hdf_idx
),
)
# set current result to row 0 of array and clear the rest:
self.res_dQ[0, ...] = self.res_dQ[stepnum[0], ...]
self.res_dQ[1:, ...] = 0.0
self.res_Pel[0, ...] = self.res_Pel[stepnum[0], ...]
self.res_Pel[1:, ...] = 0.0
self.res_Pgas[0, ...] = self.res_Pgas[stepnum[0], ...]
self.res_Pgas[1:, ...] = 0.0
self._chp_state[0, ...] = self._chp_state[stepnum[0], ...]
self._chp_state[1:, ...] = 0.0
def _special_free_memory(
self, disk_store, part, array_length, hdf_idx, stepnum
):
disk_store.append(
part + '/heating',
| pd.DataFrame(data=self.res_dQ[:array_length, ...], index=hdf_idx) | pandas.DataFrame |
import pandas as pd
from recalibrate.unarycalibration.singelsystematiccalibration import single_systematic_calibration
from pprint import pprint
from sklearn.metrics import brier_score_loss
from xgboost import XGBClassifier
# Illustrates calibration of a single set of model probabilities (user selecting a product)
if __name__=='__main__':
df_big = | pd.read_csv('https://raw.githubusercontent.com/microprediction/recalibrate/main/examples/default_data/default.csv') | pandas.read_csv |
"""Step 1: Solving the problem in a deterministic manner."""
import cvxpy as cp
import fledge
import numpy as np
import os
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import shutil
def main():
# Settings.
scenario_name = 'course_project_step_1'
results_path = os.path.join(os.path.dirname(os.path.dirname(os.path.normpath(__file__))), 'results', 'step_1')
run_primal = True
run_dual = True
run_kkt = True
# Clear / instantiate results directory.
try:
if os.path.isdir(results_path):
shutil.rmtree(results_path)
os.mkdir(results_path)
except PermissionError:
pass
# STEP 1.0: SETUP MODELS.
# Read scenario definition into FLEDGE.
# - Data directory from this repository is first added as additional data path.
fledge.config.config['paths']['additional_data'].append(
os.path.join(os.path.dirname(os.path.dirname(os.path.normpath(__file__))), 'data')
)
fledge.data_interface.recreate_database()
# Obtain data & models.
# Flexible loads.
der_model_set = fledge.der_models.DERModelSet(scenario_name)
# Thermal grid.
thermal_grid_model = fledge.thermal_grid_models.ThermalGridModel(scenario_name)
thermal_grid_model.cooling_plant_efficiency = 10.0 # Change model parameter to incentivize use of thermal grid.
thermal_power_flow_solution_reference = fledge.thermal_grid_models.ThermalPowerFlowSolution(thermal_grid_model)
linear_thermal_grid_model = (
fledge.thermal_grid_models.LinearThermalGridModel(thermal_grid_model, thermal_power_flow_solution_reference)
)
# Define arbitrary operation limits.
node_head_vector_minimum = 1.5 * thermal_power_flow_solution_reference.node_head_vector
branch_flow_vector_maximum = 10.0 * thermal_power_flow_solution_reference.branch_flow_vector
# Electric grid.
electric_grid_model = fledge.electric_grid_models.ElectricGridModelDefault(scenario_name)
power_flow_solution_reference = fledge.electric_grid_models.PowerFlowSolutionFixedPoint(electric_grid_model)
linear_electric_grid_model = (
fledge.electric_grid_models.LinearElectricGridModelGlobal(electric_grid_model, power_flow_solution_reference)
)
# Define arbitrary operation limits.
node_voltage_magnitude_vector_minimum = 0.5 * np.abs(electric_grid_model.node_voltage_vector_reference)
node_voltage_magnitude_vector_maximum = 1.5 * np.abs(electric_grid_model.node_voltage_vector_reference)
branch_power_magnitude_vector_maximum = 10.0 * electric_grid_model.branch_power_vector_magnitude_reference
# Energy price.
price_data = fledge.data_interface.PriceData(scenario_name)
# Obtain time step index shorthands.
scenario_data = fledge.data_interface.ScenarioData(scenario_name)
timesteps = scenario_data.timesteps
timestep_interval_hours = (timesteps[1] - timesteps[0]) / pd.Timedelta('1h')
# Invert sign of losses.
# - Power values of loads are negative by convention. Hence, sign of losses should be negative for power balance.
# Thermal grid.
linear_thermal_grid_model.sensitivity_pump_power_by_der_power *= -1.0
linear_thermal_grid_model.thermal_power_flow_solution.pump_power *= -1.0
# Electric grid.
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active *= -1.0
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive *= -1.0
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active *= -1.0
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive *= -1.0
linear_electric_grid_model.power_flow_solution.loss *= -1.0
# Apply base power / voltage scaling.
# - Scale values to avoid numerical issues.
base_power = 1e6 # in MW.
base_voltage = 1e3 # in kV.
# Flexible loads.
for der_model in der_model_set.flexible_der_models.values():
der_model.mapping_active_power_by_output *= 1 / base_power
der_model.mapping_reactive_power_by_output *= 1 / base_power
der_model.mapping_thermal_power_by_output *= 1 / base_power
# Thermal grid.
linear_thermal_grid_model.sensitivity_node_head_by_der_power *= base_power
linear_thermal_grid_model.sensitivity_branch_flow_by_der_power *= base_power
linear_thermal_grid_model.sensitivity_pump_power_by_der_power *= 1
# Electric grid.
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active *= base_power / base_voltage
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive *= base_power / base_voltage
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive *= 1
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive *= 1
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive *= 1
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive *= 1
linear_electric_grid_model.power_flow_solution.der_power_vector *= 1 / base_power
linear_electric_grid_model.power_flow_solution.branch_power_vector_1 *= 1 / base_power
linear_electric_grid_model.power_flow_solution.branch_power_vector_2 *= 1 / base_power
linear_electric_grid_model.power_flow_solution.loss *= 1 / base_power
linear_electric_grid_model.power_flow_solution.node_voltage_vector *= 1 / base_voltage
# Limits.
node_voltage_magnitude_vector_minimum /= base_voltage
node_voltage_magnitude_vector_maximum /= base_voltage
branch_power_magnitude_vector_maximum /= base_power
# Energy price.
# - Conversion of price values from S$/kWh to S$/p.u. for convenience. Currency S$ is SGD.
# - Power values of loads are negative by convention. Hence, sign of price values is inverted here.
price_data.price_timeseries *= -1.0 * base_power / 1e3 * timestep_interval_hours
# STEP 1.1: SOLVE PRIMAL PROBLEM.
if run_primal or run_kkt: # Primal constraints are also needed for KKT problem.
# Instantiate problem.
# - Utility object for optimization problem definition with CVXPY.
primal_problem = fledge.utils.OptimizationProblem()
# Define variables.
# Flexible loads: State space vectors.
# - CVXPY only allows for 2-dimensional variables. Using dicts below to represent 3rd dimension.
primal_problem.state_vector = dict.fromkeys(der_model_set.flexible_der_names)
primal_problem.control_vector = dict.fromkeys(der_model_set.flexible_der_names)
primal_problem.output_vector = dict.fromkeys(der_model_set.flexible_der_names)
for der_name in der_model_set.flexible_der_names:
primal_problem.state_vector[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].states)
))
)
primal_problem.control_vector[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].controls)
))
)
primal_problem.output_vector[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].outputs)
))
)
# Flexible loads: Power vectors.
primal_problem.der_thermal_power_vector = (
cp.Variable((len(timesteps), len(thermal_grid_model.ders)))
)
primal_problem.der_active_power_vector = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
primal_problem.der_reactive_power_vector = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
# Source variables.
primal_problem.source_thermal_power = cp.Variable((len(timesteps), 1))
primal_problem.source_active_power = cp.Variable((len(timesteps), 1))
primal_problem.source_reactive_power = cp.Variable((len(timesteps), 1))
# Define constraints.
# Flexible loads.
for der_model in der_model_set.flexible_der_models.values():
# Initial state.
primal_problem.constraints.append(
primal_problem.state_vector[der_model.der_name][0, :]
==
der_model.state_vector_initial.values
)
# State equation.
primal_problem.constraints.append(
primal_problem.state_vector[der_model.der_name][1:, :]
==
cp.transpose(
der_model.state_matrix.values
@ cp.transpose(primal_problem.state_vector[der_model.der_name][:-1, :])
+ der_model.control_matrix.values
@ cp.transpose(primal_problem.control_vector[der_model.der_name][:-1, :])
+ der_model.disturbance_matrix.values
@ np.transpose(der_model.disturbance_timeseries.iloc[:-1, :].values)
)
)
# Output equation.
primal_problem.constraints.append(
primal_problem.output_vector[der_model.der_name]
==
cp.transpose(
der_model.state_output_matrix.values
@ cp.transpose(primal_problem.state_vector[der_model.der_name])
+ der_model.control_output_matrix.values
@ cp.transpose(primal_problem.control_vector[der_model.der_name])
+ der_model.disturbance_output_matrix.values
@ np.transpose(der_model.disturbance_timeseries.values)
)
)
# Output limits.
primal_problem.constraints.append(
primal_problem.output_vector[der_model.der_name]
>=
der_model.output_minimum_timeseries.values
)
primal_problem.constraints.append(
primal_problem.output_vector[der_model.der_name]
<=
der_model.output_maximum_timeseries.replace(np.inf, 1e3).values
)
# Power mapping.
der_index = int(fledge.utils.get_index(electric_grid_model.ders, der_name=der_model.der_name))
primal_problem.constraints.append(
primal_problem.der_active_power_vector[:, [der_index]]
==
cp.transpose(
der_model.mapping_active_power_by_output.values
@ cp.transpose(primal_problem.output_vector[der_model.der_name])
)
)
primal_problem.constraints.append(
primal_problem.der_reactive_power_vector[:, [der_index]]
==
cp.transpose(
der_model.mapping_reactive_power_by_output.values
@ cp.transpose(primal_problem.output_vector[der_model.der_name])
)
)
primal_problem.constraints.append(
primal_problem.der_thermal_power_vector[:, [der_index]]
==
cp.transpose(
der_model.mapping_thermal_power_by_output.values
@ cp.transpose(primal_problem.output_vector[der_model.der_name])
)
)
# Thermal grid.
# Node head limit.
primal_problem.constraints.append(
np.array([node_head_vector_minimum.ravel()])
<=
cp.transpose(
linear_thermal_grid_model.sensitivity_node_head_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
)
)
# Branch flow limit.
primal_problem.constraints.append(
cp.transpose(
linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
)
<=
np.array([branch_flow_vector_maximum.ravel()])
)
# Power balance.
primal_problem.constraints.append(
thermal_grid_model.cooling_plant_efficiency ** -1
* (
primal_problem.source_thermal_power
+ cp.sum(-1.0 * (
primal_problem.der_thermal_power_vector
), axis=1, keepdims=True) # Sum along DERs, i.e. sum for each timestep.
)
==
cp.transpose(
linear_thermal_grid_model.sensitivity_pump_power_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
)
)
# Electric grid.
# Voltage limits.
primal_problem.constraints.append(
np.array([node_voltage_magnitude_vector_minimum.ravel()])
<=
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
)
primal_problem.constraints.append(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
<=
np.array([node_voltage_magnitude_vector_maximum.ravel()])
)
# Branch flow limits.
primal_problem.constraints.append(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
<=
np.array([branch_power_magnitude_vector_maximum.ravel()])
)
primal_problem.constraints.append(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
<=
np.array([branch_power_magnitude_vector_maximum.ravel()])
)
# Power balance.
primal_problem.constraints.append(
primal_problem.source_active_power
+ cp.sum(-1.0 * (
primal_problem.der_active_power_vector
), axis=1, keepdims=True) # Sum along DERs, i.e. sum for each timestep.
==
np.real(linear_electric_grid_model.power_flow_solution.loss)
+ cp.transpose(
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
)
primal_problem.constraints.append(
primal_problem.source_reactive_power
+ cp.sum(-1.0 * (
primal_problem.der_reactive_power_vector
), axis=1, keepdims=True) # Sum along DERs, i.e. sum for each timestep.
==
np.imag(linear_electric_grid_model.power_flow_solution.loss)
+ cp.transpose(
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
)
# Define objective.
primal_problem.objective += (
price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values.T
@ primal_problem.source_thermal_power
* thermal_grid_model.cooling_plant_efficiency ** -1
)
primal_problem.objective += (
price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values.T
@ primal_problem.source_active_power
)
if run_primal:
# Solve problem.
fledge.utils.log_time('primal solution')
primal_problem.solve()
fledge.utils.log_time('primal solution')
# Obtain results.
# Flexible loads.
primal_state_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.states)
primal_control_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.controls)
primal_output_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.outputs)
for der_name in der_model_set.flexible_der_names:
primal_state_vector.loc[:, (der_name, slice(None))] = (
primal_problem.state_vector[der_name].value
)
primal_control_vector.loc[:, (der_name, slice(None))] = (
primal_problem.control_vector[der_name].value
)
primal_output_vector.loc[:, (der_name, slice(None))] = (
primal_problem.output_vector[der_name].value
)
# Thermal grid.
primal_der_thermal_power_vector = (
pd.DataFrame(
primal_problem.der_thermal_power_vector.value,
columns=linear_thermal_grid_model.thermal_grid_model.ders,
index=timesteps
)
)
primal_source_thermal_power = (
pd.DataFrame(
primal_problem.source_thermal_power.value,
columns=['total'],
index=timesteps
)
)
# Electric grid.
primal_der_active_power_vector = (
pd.DataFrame(
primal_problem.der_active_power_vector.value,
columns=linear_electric_grid_model.electric_grid_model.ders,
index=timesteps
)
)
primal_der_reactive_power_vector = (
pd.DataFrame(
primal_problem.der_reactive_power_vector.value,
columns=linear_electric_grid_model.electric_grid_model.ders,
index=timesteps
)
)
primal_source_active_power = (
pd.DataFrame(
primal_problem.source_active_power.value,
columns=['total'],
index=timesteps
)
)
primal_source_reactive_power = (
pd.DataFrame(
primal_problem.source_reactive_power.value,
columns=['total'],
index=timesteps
)
)
# Additional results.
primal_node_head_vector = (
pd.DataFrame(
cp.transpose(
linear_thermal_grid_model.sensitivity_node_head_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
).value,
index=timesteps,
columns=thermal_grid_model.nodes
)
)
primal_branch_flow_vector = (
pd.DataFrame(
cp.transpose(
linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
).value,
index=timesteps,
columns=thermal_grid_model.branches
)
)
primal_node_voltage_vector = (
pd.DataFrame(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
).value,
index=timesteps,
columns=electric_grid_model.nodes
)
)
primal_branch_power_vector_1 = (
pd.DataFrame(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
).value,
index=timesteps,
columns=electric_grid_model.branches
)
)
primal_branch_power_vector_2 = (
pd.DataFrame(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
).value,
index=timesteps,
columns=electric_grid_model.branches
)
)
primal_node_head_vector_per_unit = (
primal_node_head_vector
/ thermal_grid_model.node_head_vector_reference
)
primal_branch_flow_vector_per_unit = (
primal_branch_flow_vector
/ thermal_grid_model.branch_flow_vector_reference
)
primal_node_voltage_vector_per_unit = (
primal_node_voltage_vector * base_voltage
/ np.abs(electric_grid_model.node_voltage_vector_reference)
)
primal_branch_power_vector_1_per_unit = (
primal_branch_power_vector_1 * base_power
/ electric_grid_model.branch_power_vector_magnitude_reference
)
primal_branch_power_vector_2_per_unit = (
primal_branch_power_vector_2 * base_power
/ electric_grid_model.branch_power_vector_magnitude_reference
)
# Store results.
primal_state_vector.to_csv(os.path.join(results_path, 'primal_state_vector.csv'))
primal_control_vector.to_csv(os.path.join(results_path, 'primal_control_vector.csv'))
primal_output_vector.to_csv(os.path.join(results_path, 'primal_output_vector.csv'))
primal_der_thermal_power_vector.to_csv(os.path.join(results_path, 'primal_der_thermal_power_vector.csv'))
primal_source_thermal_power.to_csv(os.path.join(results_path, 'primal_source_thermal_power.csv'))
primal_der_active_power_vector.to_csv(os.path.join(results_path, 'primal_der_active_power_vector.csv'))
primal_der_reactive_power_vector.to_csv(os.path.join(results_path, 'primal_der_reactive_power_vector.csv'))
primal_source_active_power.to_csv(os.path.join(results_path, 'primal_source_active_power.csv'))
primal_source_reactive_power.to_csv(os.path.join(results_path, 'primal_source_reactive_power.csv'))
primal_node_head_vector.to_csv(os.path.join(results_path, 'primal_node_head_vector.csv'))
primal_branch_flow_vector.to_csv(os.path.join(results_path, 'primal_branch_flow_vector.csv'))
primal_node_voltage_vector.to_csv(os.path.join(results_path, 'primal_node_voltage_vector.csv'))
primal_branch_power_vector_1.to_csv(os.path.join(results_path, 'primal_branch_power_vector_1.csv'))
primal_branch_power_vector_2.to_csv(os.path.join(results_path, 'primal_branch_power_vector_2.csv'))
primal_node_head_vector_per_unit.to_csv(os.path.join(results_path, 'primal_node_head_vector_per_unit.csv'))
primal_branch_flow_vector_per_unit.to_csv(os.path.join(results_path, 'primal_branch_flow_vector_per_unit.csv'))
primal_node_voltage_vector_per_unit.to_csv(os.path.join(results_path, 'primal_node_voltage_vector_per_unit.csv'))
primal_branch_power_vector_1_per_unit.to_csv(os.path.join(results_path, 'primal_branch_power_vector_1_per_unit.csv'))
primal_branch_power_vector_2_per_unit.to_csv(os.path.join(results_path, 'primal_branch_power_vector_2_per_unit.csv'))
# Obtain variable count / dimensions.
primal_variable_count = (
sum(np.multiply(*primal_problem.state_vector[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*primal_problem.control_vector[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*primal_problem.output_vector[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ np.multiply(*primal_problem.der_thermal_power_vector.shape)
+ np.multiply(*primal_problem.der_active_power_vector.shape)
+ np.multiply(*primal_problem.der_reactive_power_vector.shape)
+ np.multiply(*primal_problem.source_thermal_power.shape)
+ np.multiply(*primal_problem.source_active_power.shape)
+ np.multiply(*primal_problem.source_reactive_power.shape)
)
print(f"primal_variable_count = {primal_variable_count}")
# Print objective.
primal_objective = pd.Series(primal_problem.objective.value, index=['primal_objective'])
primal_objective.to_csv(os.path.join(results_path, 'primal_objective.csv'))
print(f"primal_objective = {primal_objective.values}")
# STEP 1.2: SOLVE DUAL PROBLEM.
if run_dual or run_kkt: # Primal constraints are also needed for KKT problem.
# Instantiate problem.
# - Utility object for optimization problem definition with CVXPY.
dual_problem = fledge.utils.OptimizationProblem()
# Define variables.
# Flexible loads: State space equations.
# - CVXPY only allows for 2-dimensional variables. Using dicts below to represent 3rd dimension.
dual_problem.lambda_initial_state_equation = dict.fromkeys(der_model_set.flexible_der_names)
dual_problem.lambda_state_equation = dict.fromkeys(der_model_set.flexible_der_names)
dual_problem.lambda_output_equation = dict.fromkeys(der_model_set.flexible_der_names)
dual_problem.mu_output_minimum = dict.fromkeys(der_model_set.flexible_der_names)
dual_problem.mu_output_maximum = dict.fromkeys(der_model_set.flexible_der_names)
for der_name in der_model_set.flexible_der_names:
dual_problem.lambda_initial_state_equation[der_name] = (
cp.Variable((
1,
len(der_model_set.flexible_der_models[der_name].states)
))
)
dual_problem.lambda_state_equation[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps[:-1]),
len(der_model_set.flexible_der_models[der_name].states)
))
)
dual_problem.lambda_output_equation[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].outputs)
))
)
dual_problem.mu_output_minimum[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].outputs)
), nonneg=True)
)
dual_problem.mu_output_maximum[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].outputs)
), nonneg=True)
)
# Flexible loads: Power equations.
dual_problem.lambda_thermal_power_equation = (
cp.Variable((len(timesteps), len(thermal_grid_model.ders)))
)
dual_problem.lambda_active_power_equation = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
dual_problem.lambda_reactive_power_equation = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
# Thermal grid.
dual_problem.mu_node_head_minium = (
cp.Variable((len(timesteps), len(thermal_grid_model.nodes)), nonneg=True)
)
dual_problem.mu_branch_flow_maximum = (
cp.Variable((len(timesteps), len(thermal_grid_model.branches)), nonneg=True)
)
dual_problem.lambda_pump_power_equation = (
cp.Variable((len(timesteps), 1))
)
# Electric grid.
dual_problem.mu_node_voltage_magnitude_minimum = (
cp.Variable((len(timesteps), len(electric_grid_model.nodes)), nonneg=True)
)
dual_problem.mu_node_voltage_magnitude_maximum = (
cp.Variable((len(timesteps), len(electric_grid_model.nodes)), nonneg=True)
)
dual_problem.mu_branch_power_magnitude_maximum_1 = (
cp.Variable((len(timesteps), len(electric_grid_model.branches)), nonneg=True)
)
dual_problem.mu_branch_power_magnitude_maximum_2 = (
cp.Variable((len(timesteps), len(electric_grid_model.branches)), nonneg=True)
)
dual_problem.lambda_loss_active_equation = cp.Variable((len(timesteps), 1))
dual_problem.lambda_loss_reactive_equation = cp.Variable((len(timesteps), 1))
# Define constraints.
for der_model in der_model_set.flexible_der_models.values():
# Differential with respect to state vector.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_initial_state_equation[der_model.der_name]
- (
dual_problem.lambda_state_equation[der_model.der_name][:1, :]
@ der_model.state_matrix.values
)
- (
dual_problem.lambda_output_equation[der_model.der_name][:1, :]
@ der_model.state_output_matrix.values
)
)
)
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_state_equation[der_model.der_name][0:-1, :]
- (
dual_problem.lambda_state_equation[der_model.der_name][1:, :]
@ der_model.state_matrix.values
)
- (
dual_problem.lambda_output_equation[der_model.der_name][1:-1, :]
@ der_model.state_output_matrix.values
)
)
)
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_state_equation[der_model.der_name][-1:, :]
- (
dual_problem.lambda_output_equation[der_model.der_name][-1:, :]
@ der_model.state_output_matrix.values
)
)
)
# Differential with respect to control vector.
dual_problem.constraints.append(
0.0
==
(
- (
dual_problem.lambda_state_equation[der_model.der_name]
@ der_model.control_matrix.values
)
- (
dual_problem.lambda_output_equation[der_model.der_name][:-1, :]
@ der_model.control_output_matrix.values
)
)
)
dual_problem.constraints.append(
0.0
==
(
- (
dual_problem.lambda_output_equation[der_model.der_name][-1:, :]
@ der_model.control_output_matrix.values
)
)
)
# Differential with respect to output vector.
der_index = int(fledge.utils.get_index(electric_grid_model.ders, der_name=der_model.der_name))
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_output_equation[der_model.der_name]
- dual_problem.mu_output_minimum[der_model.der_name]
+ dual_problem.mu_output_maximum[der_model.der_name]
- (
dual_problem.lambda_thermal_power_equation[:, [der_index]]
@ der_model.mapping_thermal_power_by_output.values
)
- (
dual_problem.lambda_active_power_equation[:, [der_index]]
@ der_model.mapping_active_power_by_output.values
)
- (
dual_problem.lambda_reactive_power_equation[:, [der_index]]
@ der_model.mapping_reactive_power_by_output.values
)
)
)
# Differential with respect to thermal power vector.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_thermal_power_equation
- (
dual_problem.mu_node_head_minium
@ linear_thermal_grid_model.sensitivity_node_head_by_der_power
)
+ (
dual_problem.mu_branch_flow_maximum
@ linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
)
- (
dual_problem.lambda_pump_power_equation
@ (
thermal_grid_model.cooling_plant_efficiency ** -1
* np.ones(linear_thermal_grid_model.sensitivity_pump_power_by_der_power.shape)
+ linear_thermal_grid_model.sensitivity_pump_power_by_der_power
)
)
)
)
# Differential with respect to active power vector.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_active_power_equation
- (
dual_problem.mu_node_voltage_magnitude_minimum
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
)
+ (
dual_problem.mu_node_voltage_magnitude_maximum
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
)
+ (
dual_problem.mu_branch_power_magnitude_maximum_1
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
)
+ (
dual_problem.mu_branch_power_magnitude_maximum_2
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
)
- (
dual_problem.lambda_loss_active_equation
@ (
np.ones(linear_electric_grid_model.sensitivity_loss_active_by_der_power_active.shape)
+ linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
)
)
- (
dual_problem.lambda_loss_reactive_equation
@ linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
)
)
)
# Differential with respect to reactive power vector.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_reactive_power_equation
- (
dual_problem.mu_node_voltage_magnitude_minimum
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
)
+ (
dual_problem.mu_node_voltage_magnitude_maximum
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
)
+ (
dual_problem.mu_branch_power_magnitude_maximum_1
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
)
+ (
dual_problem.mu_branch_power_magnitude_maximum_2
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
)
- (
dual_problem.lambda_loss_active_equation
@ linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
)
- (
dual_problem.lambda_loss_reactive_equation
@ (
np.ones(linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive.shape)
+ linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
)
)
)
)
# Differential with respect to thermal source power.
dual_problem.constraints.append(
0.0
==
(
np.transpose([price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values])
+ dual_problem.lambda_pump_power_equation
)
)
# Differential with respect to active source power.
dual_problem.constraints.append(
0.0
==
(
np.transpose([price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values])
+ dual_problem.lambda_loss_active_equation
)
)
# Differential with respect to active source power.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_loss_reactive_equation
)
)
if run_dual:
# Define objective.
# Flexible loads.
for der_model in der_model_set.flexible_der_models.values():
dual_problem.objective += (
-1.0
* cp.sum(cp.multiply(
dual_problem.lambda_initial_state_equation[der_model.der_name],
np.array([der_model.state_vector_initial.values])
))
)
dual_problem.objective += (
-1.0
* cp.sum(cp.multiply(
dual_problem.lambda_state_equation[der_model.der_name],
cp.transpose(
der_model.disturbance_matrix.values
@ np.transpose(der_model.disturbance_timeseries.values[:-1, :])
)
))
)
dual_problem.objective += (
-1.0
* cp.sum(cp.multiply(
dual_problem.lambda_output_equation[der_model.der_name],
cp.transpose(
der_model.disturbance_output_matrix.values
@ np.transpose(der_model.disturbance_timeseries.values)
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_output_minimum[der_model.der_name],
der_model.output_minimum_timeseries.values
))
)
dual_problem.objective += (
-1.0
* cp.sum(cp.multiply(
dual_problem.mu_output_maximum[der_model.der_name],
der_model.output_maximum_timeseries.replace(np.inf, 1e3).values
))
)
# Thermal grid.
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_node_head_minium,
(
np.array([node_head_vector_minimum])
# - node_head_vector_reference
# + (
# linear_thermal_grid_model.sensitivity_node_head_by_der_power
# @ der_thermal_power_vector_reference
# )
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_branch_flow_maximum,
(
# - branch_flow_vector_reference
# + (
# linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
# @ der_thermal_power_vector_reference
# )
- 1.0
* np.array([branch_flow_vector_maximum])
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.lambda_pump_power_equation,
(
0.0
# - pump_power_reference
# + (
# linear_thermal_grid_model.sensitivity_pump_power_by_der_power
# @ der_thermal_power_vector_reference
# )
)
))
)
# Electric grid.
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_node_voltage_magnitude_minimum,
(
np.array([node_voltage_magnitude_vector_minimum])
- np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector)])
+ np.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
+ np.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_node_voltage_magnitude_maximum,
(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector)])
- np.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.array([node_voltage_magnitude_vector_maximum])
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_branch_power_magnitude_maximum_1,
(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1)])
- np.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.array([branch_power_magnitude_vector_maximum])
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_branch_power_magnitude_maximum_2,
(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2)])
- np.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.array([branch_power_magnitude_vector_maximum])
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.lambda_loss_active_equation,
(
-1.0
* np.array([np.real(linear_electric_grid_model.power_flow_solution.loss)])
+ np.transpose(
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
+ np.transpose(
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.lambda_loss_reactive_equation,
(
-1.0
* np.array([np.imag(linear_electric_grid_model.power_flow_solution.loss)])
+ np.transpose(
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
+ np.transpose(
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
)
))
)
# Invert sign of objective for maximisation.
dual_problem.objective *= -1.0
# Solve problem.
fledge.utils.log_time('dual solution')
dual_problem.solve()
fledge.utils.log_time('dual solution')
# Obtain results.
# Flexible loads.
dual_lambda_initial_state_equation = pd.DataFrame(0.0, index=der_model_set.timesteps[:1], columns=der_model_set.states)
dual_lambda_state_equation = pd.DataFrame(0.0, index=der_model_set.timesteps[:-1], columns=der_model_set.states)
dual_lambda_output_equation = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.outputs)
dual_mu_output_minimum = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.outputs)
dual_mu_output_maximum = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.outputs)
for der_name in der_model_set.flexible_der_names:
dual_lambda_initial_state_equation.loc[:, (der_name, slice(None))] = (
dual_problem.lambda_initial_state_equation[der_name].value
)
dual_lambda_state_equation.loc[:, (der_name, slice(None))] = (
dual_problem.lambda_state_equation[der_name].value
)
dual_lambda_output_equation.loc[:, (der_name, slice(None))] = (
dual_problem.lambda_output_equation[der_name].value
)
dual_mu_output_minimum.loc[:, (der_name, slice(None))] = (
dual_problem.mu_output_minimum[der_name].value
)
dual_mu_output_maximum.loc[:, (der_name, slice(None))] = (
dual_problem.mu_output_maximum[der_name].value
)
# Flexible loads: Power equations.
dual_lambda_thermal_power_equation = (
pd.DataFrame(
dual_problem.lambda_thermal_power_equation.value,
index=timesteps,
columns=thermal_grid_model.ders
)
)
dual_lambda_active_power_equation = (
pd.DataFrame(
dual_problem.lambda_active_power_equation.value,
index=timesteps,
columns=electric_grid_model.ders
)
)
dual_lambda_reactive_power_equation = (
pd.DataFrame(
dual_problem.lambda_reactive_power_equation.value,
index=timesteps,
columns=electric_grid_model.ders
)
)
# Thermal grid.
dual_mu_node_head_minium = (
pd.DataFrame(
dual_problem.mu_node_head_minium.value,
index=timesteps,
columns=thermal_grid_model.nodes
)
)
dual_mu_branch_flow_maximum = (
pd.DataFrame(
dual_problem.mu_branch_flow_maximum.value,
index=timesteps,
columns=thermal_grid_model.branches
)
)
dual_lambda_pump_power_equation = (
pd.DataFrame(
dual_problem.lambda_pump_power_equation.value,
index=timesteps,
columns=['total']
)
)
# Electric grid.
dual_mu_node_voltage_magnitude_minimum = (
pd.DataFrame(
dual_problem.mu_node_voltage_magnitude_minimum.value,
index=timesteps,
columns=electric_grid_model.nodes
)
)
dual_mu_node_voltage_magnitude_maximum = (
pd.DataFrame(
dual_problem.mu_node_voltage_magnitude_maximum.value,
index=timesteps,
columns=electric_grid_model.nodes
)
)
dual_mu_branch_power_magnitude_maximum_1 = (
pd.DataFrame(
dual_problem.mu_branch_power_magnitude_maximum_1.value,
index=timesteps,
columns=electric_grid_model.branches
)
)
dual_mu_branch_power_magnitude_maximum_2 = (
pd.DataFrame(
dual_problem.mu_branch_power_magnitude_maximum_2.value,
index=timesteps,
columns=electric_grid_model.branches
)
)
dual_lambda_loss_active_equation = (
pd.DataFrame(
dual_problem.lambda_loss_active_equation.value,
index=timesteps,
columns=['total']
)
)
dual_lambda_loss_reactive_equation = (
pd.DataFrame(
dual_problem.lambda_loss_reactive_equation.value,
index=timesteps,
columns=['total']
)
)
# Store results.
dual_lambda_initial_state_equation.to_csv(os.path.join(results_path, 'dual_lambda_initial_state_equation.csv'))
dual_lambda_state_equation.to_csv(os.path.join(results_path, 'dual_lambda_state_equation.csv'))
dual_lambda_output_equation.to_csv(os.path.join(results_path, 'dual_lambda_output_equation.csv'))
dual_mu_output_minimum.to_csv(os.path.join(results_path, 'dual_mu_output_minimum.csv'))
dual_mu_output_maximum.to_csv(os.path.join(results_path, 'dual_mu_output_maximum.csv'))
dual_lambda_thermal_power_equation.to_csv(os.path.join(results_path, 'dual_lambda_thermal_power_equation.csv'))
dual_lambda_active_power_equation.to_csv(os.path.join(results_path, 'dual_lambda_active_power_equation.csv'))
dual_lambda_reactive_power_equation.to_csv(os.path.join(results_path, 'dual_lambda_reactive_power_equation.csv'))
dual_mu_node_head_minium.to_csv(os.path.join(results_path, 'dual_mu_node_head_minium.csv'))
dual_mu_branch_flow_maximum.to_csv(os.path.join(results_path, 'dual_mu_branch_flow_maximum.csv'))
dual_lambda_pump_power_equation.to_csv(os.path.join(results_path, 'dual_lambda_pump_power_equation.csv'))
dual_mu_node_voltage_magnitude_minimum.to_csv(os.path.join(results_path, 'dual_mu_node_voltage_magnitude_minimum.csv'))
dual_mu_node_voltage_magnitude_maximum.to_csv(os.path.join(results_path, 'dual_mu_node_voltage_magnitude_maximum.csv'))
dual_mu_branch_power_magnitude_maximum_1.to_csv(os.path.join(results_path, 'dual_mu_branch_power_magnitude_maximum_1.csv'))
dual_mu_branch_power_magnitude_maximum_2.to_csv(os.path.join(results_path, 'dual_mu_branch_power_magnitude_maximum_2.csv'))
dual_lambda_loss_active_equation.to_csv(os.path.join(results_path, 'dual_lambda_loss_active_equation.csv'))
dual_lambda_loss_reactive_equation.to_csv(os.path.join(results_path, 'dual_lambda_loss_reactive_equation.csv'))
# Obtain variable count / dimensions.
dual_variable_count = (
sum(np.multiply(*dual_problem.lambda_initial_state_equation[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*dual_problem.lambda_state_equation[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*dual_problem.lambda_output_equation[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*dual_problem.mu_output_minimum[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*dual_problem.mu_output_maximum[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ np.multiply(*dual_problem.lambda_thermal_power_equation.shape)
+ np.multiply(*dual_problem.lambda_active_power_equation.shape)
+ np.multiply(*dual_problem.lambda_reactive_power_equation.shape)
+ np.multiply(*dual_problem.mu_node_head_minium.shape)
+ np.multiply(*dual_problem.mu_branch_flow_maximum.shape)
+ np.multiply(*dual_problem.lambda_pump_power_equation.shape)
+ np.multiply(*dual_problem.mu_node_voltage_magnitude_minimum.shape)
+ np.multiply(*dual_problem.mu_node_voltage_magnitude_maximum.shape)
+ np.multiply(*dual_problem.mu_branch_power_magnitude_maximum_1.shape)
+ np.multiply(*dual_problem.mu_branch_power_magnitude_maximum_2.shape)
+ np.multiply(*dual_problem.lambda_loss_active_equation.shape)
+ np.multiply(*dual_problem.lambda_loss_reactive_equation.shape)
)
print(f"dual_variable_count = {dual_variable_count}")
# Print objective.
dual_objective = pd.Series(-1.0 * dual_problem.objective.value, index=['dual_objective'])
dual_objective.to_csv(os.path.join(results_path, 'dual_objective.csv'))
print(f"dual_objective = {dual_objective.values}")
# STEP 1.3: SOLVE KKT CONDITIONS.
if run_kkt:
# Instantiate problem.
# - Utility object for optimization problem definition with CVXPY.
kkt_problem = fledge.utils.OptimizationProblem()
# Obtain primal and dual variables.
# - Since primal and dual variables are part of the KKT conditions, the previous definitions are recycled.
kkt_problem.state_vector = primal_problem.state_vector
kkt_problem.control_vector = primal_problem.control_vector
kkt_problem.output_vector = primal_problem.output_vector
kkt_problem.der_thermal_power_vector = primal_problem.der_thermal_power_vector
kkt_problem.der_active_power_vector = primal_problem.der_active_power_vector
kkt_problem.der_reactive_power_vector = primal_problem.der_reactive_power_vector
kkt_problem.source_thermal_power = primal_problem.source_thermal_power
kkt_problem.source_active_power = primal_problem.source_active_power
kkt_problem.source_reactive_power = primal_problem.source_reactive_power
kkt_problem.lambda_initial_state_equation = dual_problem.lambda_initial_state_equation
kkt_problem.lambda_state_equation = dual_problem.lambda_state_equation
kkt_problem.lambda_output_equation = dual_problem.lambda_output_equation
kkt_problem.mu_output_minimum = dual_problem.mu_output_minimum
kkt_problem.mu_output_maximum = dual_problem.mu_output_maximum
kkt_problem.lambda_thermal_power_equation = dual_problem.lambda_thermal_power_equation
kkt_problem.lambda_active_power_equation = dual_problem.lambda_active_power_equation
kkt_problem.lambda_reactive_power_equation = dual_problem.lambda_reactive_power_equation
kkt_problem.mu_node_head_minium = dual_problem.mu_node_head_minium
kkt_problem.mu_branch_flow_maximum = dual_problem.mu_branch_flow_maximum
kkt_problem.lambda_pump_power_equation = dual_problem.lambda_pump_power_equation
kkt_problem.mu_node_voltage_magnitude_minimum = dual_problem.mu_node_voltage_magnitude_minimum
kkt_problem.mu_node_voltage_magnitude_maximum = dual_problem.mu_node_voltage_magnitude_maximum
kkt_problem.mu_branch_power_magnitude_maximum_1 = dual_problem.mu_branch_power_magnitude_maximum_1
kkt_problem.mu_branch_power_magnitude_maximum_2 = dual_problem.mu_branch_power_magnitude_maximum_2
kkt_problem.lambda_loss_active_equation = dual_problem.lambda_loss_active_equation
kkt_problem.lambda_loss_reactive_equation = dual_problem.lambda_loss_reactive_equation
# Obtain primal and dual constraints.
# - Since primal and dual constraints are part of the KKT conditions, the previous definitions are recycled.
kkt_problem.constraints.extend(primal_problem.constraints)
kkt_problem.constraints.extend(dual_problem.constraints)
# Obtain primal and dual problem objective.
# - For testing / debugging only, since the KKT problem does not technically have any objective.
# kkt_problem.objective = primal_problem.objective
# kkt_problem.objective = dual_problem.objective
# Define complementarity binary variables.
kkt_problem.psi_output_minimum = dict.fromkeys(der_model_set.flexible_der_names)
kkt_problem.psi_output_maximum = dict.fromkeys(der_model_set.flexible_der_names)
for der_name in der_model_set.flexible_der_names:
kkt_problem.psi_output_minimum[der_name] = (
cp.Variable(kkt_problem.mu_output_minimum[der_name].shape, boolean=True)
)
kkt_problem.psi_output_maximum[der_name] = (
cp.Variable(kkt_problem.mu_output_maximum[der_name].shape, boolean=True)
)
kkt_problem.psi_node_head_minium = cp.Variable(kkt_problem.mu_node_head_minium.shape, boolean=True)
kkt_problem.psi_branch_flow_maximum = cp.Variable(kkt_problem.mu_branch_flow_maximum.shape, boolean=True)
kkt_problem.psi_node_voltage_magnitude_minimum = cp.Variable(kkt_problem.mu_node_voltage_magnitude_minimum.shape, boolean=True)
kkt_problem.psi_node_voltage_magnitude_maximum = cp.Variable(kkt_problem.mu_node_voltage_magnitude_maximum.shape, boolean=True)
kkt_problem.psi_branch_power_magnitude_maximum_1 = cp.Variable(kkt_problem.mu_branch_power_magnitude_maximum_1.shape, boolean=True)
kkt_problem.psi_branch_power_magnitude_maximum_2 = cp.Variable(kkt_problem.mu_branch_power_magnitude_maximum_2.shape, boolean=True)
# Define complementarity big M parameters.
# - Big M values are chosen based on expected order of magnitude of constraints from primal / dual solution.
kkt_problem.big_m_output_minimum = cp.Parameter(value=2e4)
kkt_problem.big_m_output_maximum = cp.Parameter(value=2e4)
kkt_problem.big_m_node_head_minium = cp.Parameter(value=1e2)
kkt_problem.big_m_branch_flow_maximum = cp.Parameter(value=1e3)
kkt_problem.big_m_node_voltage_magnitude_minimum = cp.Parameter(value=1e2)
kkt_problem.big_m_node_voltage_magnitude_maximum = cp.Parameter(value=1e2)
kkt_problem.big_m_branch_power_magnitude_maximum_1 = cp.Parameter(value=1e3)
kkt_problem.big_m_branch_power_magnitude_maximum_2 = cp.Parameter(value=1e3)
# Define complementarity constraints.
# Flexible loads.
for der_model in der_model_set.flexible_der_models.values():
# Output limits.
kkt_problem.constraints.append(
-1.0
* (
der_model.output_minimum_timeseries.values
- kkt_problem.output_vector[der_model.der_name]
)
<=
kkt_problem.psi_output_minimum[der_model.der_name]
* kkt_problem.big_m_output_minimum
)
kkt_problem.constraints.append(
kkt_problem.mu_output_minimum[der_model.der_name]
<=
(1 - kkt_problem.psi_output_minimum[der_model.der_name])
* kkt_problem.big_m_output_minimum
)
kkt_problem.constraints.append(
-1.0
* (
kkt_problem.output_vector[der_model.der_name]
- der_model.output_maximum_timeseries.replace(np.inf, 1e4).values
)
<=
kkt_problem.psi_output_maximum[der_model.der_name]
* kkt_problem.big_m_output_maximum
)
kkt_problem.constraints.append(
kkt_problem.mu_output_maximum[der_model.der_name]
<=
(1 - kkt_problem.psi_output_maximum[der_model.der_name])
* kkt_problem.big_m_output_maximum
)
# Thermal grid.
# Node head limit.
kkt_problem.constraints.append(
-1.0
* (
np.array([node_head_vector_minimum.ravel()])
- cp.transpose(
linear_thermal_grid_model.sensitivity_node_head_by_der_power
@ cp.transpose(kkt_problem.der_thermal_power_vector)
)
)
<=
kkt_problem.psi_node_head_minium
* kkt_problem.big_m_node_head_minium
)
kkt_problem.constraints.append(
kkt_problem.mu_node_head_minium
<=
(1 - kkt_problem.psi_node_head_minium)
* kkt_problem.big_m_node_head_minium
)
# Branch flow limit.
kkt_problem.constraints.append(
-1.0
* (
cp.transpose(
linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
@ cp.transpose(kkt_problem.der_thermal_power_vector)
)
- np.array([branch_flow_vector_maximum.ravel()])
)
<=
kkt_problem.psi_branch_flow_maximum
* kkt_problem.big_m_branch_flow_maximum
)
kkt_problem.constraints.append(
kkt_problem.mu_branch_flow_maximum
<=
(1 - kkt_problem.psi_branch_flow_maximum)
* kkt_problem.big_m_branch_flow_maximum
)
# Voltage limits.
kkt_problem.constraints.append(
-1.0
* (
np.array([node_voltage_magnitude_vector_minimum.ravel()])
- np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
- cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
kkt_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
kkt_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
)
<=
kkt_problem.psi_node_voltage_magnitude_minimum
* kkt_problem.big_m_node_voltage_magnitude_minimum
)
kkt_problem.constraints.append(
kkt_problem.mu_node_voltage_magnitude_minimum
<=
(1 - kkt_problem.psi_node_voltage_magnitude_minimum)
* kkt_problem.big_m_node_voltage_magnitude_minimum
)
kkt_problem.constraints.append(
-1.0
* (
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
kkt_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
kkt_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
- np.array([node_voltage_magnitude_vector_maximum.ravel()])
)
<=
kkt_problem.psi_node_voltage_magnitude_maximum
* kkt_problem.big_m_node_voltage_magnitude_maximum
)
kkt_problem.constraints.append(
kkt_problem.mu_node_voltage_magnitude_maximum
<=
(1 - kkt_problem.psi_node_voltage_magnitude_maximum)
* kkt_problem.big_m_node_voltage_magnitude_maximum
)
# Branch flow limits.
kkt_problem.constraints.append(
-1.0
* (
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ cp.transpose(
kkt_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ cp.transpose(
kkt_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
- np.array([branch_power_magnitude_vector_maximum.ravel()])
)
<=
kkt_problem.psi_branch_power_magnitude_maximum_1
* kkt_problem.big_m_branch_power_magnitude_maximum_1
)
kkt_problem.constraints.append(
kkt_problem.mu_branch_power_magnitude_maximum_1
<=
(1 - kkt_problem.psi_branch_power_magnitude_maximum_1)
* kkt_problem.big_m_branch_power_magnitude_maximum_1
)
kkt_problem.constraints.append(
-1.0
* (
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ cp.transpose(
kkt_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ cp.transpose(
kkt_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
- np.array([branch_power_magnitude_vector_maximum.ravel()])
)
<=
kkt_problem.psi_branch_power_magnitude_maximum_2
* kkt_problem.big_m_branch_power_magnitude_maximum_2
)
kkt_problem.constraints.append(
kkt_problem.mu_branch_power_magnitude_maximum_2
<=
(1 - kkt_problem.psi_branch_power_magnitude_maximum_2)
* kkt_problem.big_m_branch_power_magnitude_maximum_2
)
# Solve problem.
fledge.utils.log_time('KKT solution')
kkt_problem.solve()
fledge.utils.log_time('KKT solution')
# Obtain results.
# Flexible loads.
kkt_state_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.states)
kkt_control_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.controls)
kkt_output_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.outputs)
kkt_lambda_initial_state_equation = pd.DataFrame(0.0, index=der_model_set.timesteps[:1], columns=der_model_set.states)
kkt_lambda_state_equation = pd.DataFrame(0.0, index=der_model_set.timesteps[:-1], columns=der_model_set.states)
kkt_lambda_output_equation = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.outputs)
kkt_mu_output_minimum = | pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.outputs) | pandas.DataFrame |
import numpy as np
import pandas as pd
import math
from elopackage.elo import Elo
from elopackage.player import Player
class ResultsTable:
def __init__(self, df):
"""
df - pd DataFrame of tournamenent results
"""
# self.df = df.sort_values(by='match_date_dt', ascending=True)
self.df = df.copy(deep=True)
self.elo = Elo('test')
self.player_dict = {2000000: Player('dummy', 2000000)}
self.dummy_tsid = 2000001
self.cold_start_threshold = 0
def add_players_to_dict(self, row, df_results, kfactor=None, sd=None):
"""
Adds all players from a row in the results table to player_dict, unless they already exist in player_dict
Also, assigns temp tsid to players with missing tsid. This is a unique value starting at 2,000,001
:param row: pd.Series - row of results df
:param df_results: pd DataFrame - results df
:param kfactor: float - kfactor to assign to player object
:param sd: float - sd to assign to player object
:return: None
"""
row = row._asdict()
if row['Doubles']:
col_headings = [t + p + "_tsid" for t in ['winning_team_', 'losing_team_'] for p in ['p1', 'p2']]
else:
col_headings = [t + p + "_tsid" for t in ['winning_team_', 'losing_team_'] for p in ['p1']]
tsids = [row[c] for c in col_headings]
names = [row[c.split('_tsid')[0]] for c in col_headings]
for ch, t, n in zip(col_headings, tsids, names):
if np.isnan(t):
self.player_dict[self.dummy_tsid] = Player(n, self.dummy_tsid, kfactor=kfactor, sd=sd)
df_results.at[row['Index'], ch] = self.dummy_tsid
self.dummy_tsid += 1
else:
if t not in self.player_dict:
self.player_dict[t] = Player(n, t, kfactor=kfactor, sd=sd)
def get_unique_players_in_category(self, category):
"""
category - list (str) - tournament category e.g MS - <NAME>
"""
self.df = self.df[self.df['event_title'].isin(category)]
df_unique = pd.DataFrame()
for v in ['losing_team_p1', 'winning_team_p1']:
# Get unique players available from 2019 season
df_tmp = self.df[[f'{v}_tsid', v]].copy(deep=True)
df_tmp.drop_duplicates(subset=[f'{v}_tsid'], inplace=True)
df_tmp.columns = ['tsid', 'name']
df_unique = | pd.concat([df_unique, df_tmp]) | pandas.concat |
import os, sys, platform, json, operator, multiprocessing, io, random, itertools, warnings, h5py, \
statistics, inspect, requests, validators, math, time, pprint, datetime, importlib, fsspec, scipy
# Python utils.
from textwrap import dedent
# External utils.
from tqdm import tqdm #progress bar.
from natsort import natsorted #file sorting.
import appdirs #os-agonistic folder.
# ORM.
from peewee import Model, CharField, IntegerField, BlobField, BooleanField, DateTimeField, ForeignKeyField
from playhouse.sqlite_ext import SqliteExtDatabase, JSONField
from playhouse.fields import PickleField
import dill as dill #complex serialization.
# ETL.
import pandas as pd
import numpy as np
from PIL import Image as Imaje
# Preprocessing & metrics.
import sklearn
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold #mandatory separate import.
from sklearn.feature_extraction.text import CountVectorizer
# Deep learning.
import keras
import torch
# Visualization.
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
from .configuration import setup_database, destroy_database, get_db
name = "aiqc"
"""
https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
- 'fork' makes all variables on main process available to child process. OS attempts not to duplicate all variables.
- 'spawn' requires that variables be passed to child as args, and seems to play by pickle's rules (e.g. no func in func).
- In Python 3.8, macOS changed default from 'fork' to 'spawn' , which is how I learned all this.
- Windows does not support 'fork'. It supports 'spawn'. So basically I have to play by spawn/ pickle rules.
- Spawn/ pickle dictates (1) where execute_jobs func is placed, (2) if MetricsCutoff func works, (3) if tqdm output is visible.
- Update: now MetricsCutoff is not working in `fork` mode.
- Wrote the `poll_progress` func for 'spawn' situations.
- If everything hits the fan, `run_jobs(in_background=False)` for a normal for loop.
- Tried `concurrent.futures` but it only works with `.py` from command line.
"""
if (os.name != 'nt'):
# If `force=False`, then `importlib.reload(aiqc)` triggers `RuntimeError: context already set`.
multiprocessing.set_start_method('fork', force=True)
app_dir_no_trailing_slash = appdirs.user_data_dir("aiqc")
# Adds either a trailing slash or backslashes depending on OS.
app_dir = os.path.join(app_dir_no_trailing_slash, '')
default_config_path = app_dir + "config.json"
default_db_path = app_dir + "aiqc.sqlite3"
#==================================================
# CONFIGURATION
#==================================================
def setup():
setup_database([ File, Tabular, Image,
Dataset,
Label, Feature,
Splitset, Featureset, Foldset, Fold,
Encoderset, Labelcoder, Featurecoder,
Algorithm, Hyperparamset, Hyperparamcombo,
Queue, Jobset, Job, Predictor, Prediction,
FittedEncoderset, FittedLabelcoder,
Window
])
def destroy_db(confirm:bool=False, rebuild:bool=False):
destroy_database([ File, Tabular, Image,
Dataset,
Label, Feature,
Splitset, Featureset, Foldset, Fold,
Encoderset, Labelcoder, Featurecoder,
Algorithm, Hyperparamset, Hyperparamcombo,
Queue, Jobset, Job, Predictor, Prediction,
FittedEncoderset, FittedLabelcoder,
Window
], confirm, rebuild)
#==================================================
# ORM
#==================================================
# --------- GLOBALS ---------
categorical_encoders = [
'OneHotEncoder', 'LabelEncoder', 'OrdinalEncoder',
'Binarizer', 'LabelBinarizer', 'MultiLabelBinarizer'
]
# --------- HELPER FUNCTIONS ---------
def listify(supposed_lst:object=None):
"""
- When only providing a single element, it's easy to forget to put it inside a list!
"""
if (supposed_lst is not None):
if (not isinstance(supposed_lst, list)):
supposed_lst = [supposed_lst]
# If it was already a list, check it for emptiness and `None`.
elif (isinstance(supposed_lst, list)):
if (not supposed_lst):
raise ValueError("Yikes - The list you provided is empty.")
if (None in supposed_lst):
raise ValueError(dedent(
f"Yikes - The list you provided contained `None` as an element." \
f"{supposed_lst}"
))
# Allow `is None` to pass through because we need it to trigger null conditions.
return supposed_lst
def dill_serialize(objekt:object):
blob = io.BytesIO()
dill.dump(objekt, blob)
blob = blob.getvalue()
return blob
def dill_deserialize(blob:bytes):
objekt = io.BytesIO(blob)
objekt = dill.load(objekt)
return objekt
def dill_reveal_code(serialized_objekt:object, print_it:bool=True):
code_str = (
dill.source.getsource(
dill_deserialize(serialized_objekt).__code__
)
)
if (print_it == True):
print(dedent(code_str))
return code_str
def torch_batcher(
features:object
, labels:object
, batch_size = 5
, enforce_sameSize:bool=False
, allow_1Sample:bool=False
):
features = torch.split(features, batch_size)
labels = torch.split(labels, batch_size)
features = torch_drop_invalid_batchSize(features)
labels = torch_drop_invalid_batchSize(labels)
return features, labels
def torch_drop_invalid_batchSize(
batched_data:object
, batch_size = 5
, enforce_sameSize:bool=False
, allow_1Sample:bool=False
):
if (batch_size == 1):
print("\nWarning - `batch_size==1` can lead to errors.\nE.g. running BatchNormalization on a single sample.\n")
# Similar to a % remainder, this will only apply to the last element in the batch.
last_batch_size = batched_data[-1].shape[0]
if (
((allow_1Sample == False) and (last_batch_size == 1))
or
((enforce_sameSize == True) and (batched_data[0].shape[0] != last_batch_size))
):
# So if there is a problem, just trim the last split.
batched_data = batched_data[:-1]
return batched_data
def tf_batcher(features:object, labels:object, batch_size = 5):
"""
- `np.array_split` allows for subarrays to be of different sizes, which is rare.
https://numpy.org/doc/stable/reference/generated/numpy.array_split.html
- If there is a remainder, it will evenly distribute samples into the other arrays.
- Have not tested this with >= 3D data yet.
"""
rows_per_batch = math.ceil(features.shape[0]/batch_size)
batched_features = np.array_split(features, rows_per_batch)
batched_features = np.array(batched_features, dtype=object)
batched_labels = np.array_split(labels, rows_per_batch)
batched_labels = np.array(batched_labels, dtype=object)
return batched_features, batched_labels
# --------- END HELPERS ---------
class BaseModel(Model):
"""
- Runs when the package is imported. http://docs.peewee-orm.com/en/latest/peewee/models.html
- ORM: by inheritting the BaseModel class, each Model class does not have to set Meta.
"""
class Meta:
database = get_db()
class Dataset(BaseModel):
"""
The sub-classes are not 1-1 tables. They simply provide namespacing for functions
to avoid functions riddled with if statements about dataset_type and null parameters.
"""
dataset_type = CharField() #tabular, image, sequence, graph, audio.
file_count = IntegerField() # only includes file_types that match the dataset_type.
source_path = CharField(null=True)
def make_label(id:int, columns:list):
columns = listify(columns)
l = Label.from_dataset(dataset_id=id, columns=columns)
return l
def make_feature(
id:int
, include_columns:list = None
, exclude_columns:list = None
):
include_columns = listify(include_columns)
exclude_columns = listify(exclude_columns)
feature = Feature.from_dataset(
dataset_id = id
, include_columns = include_columns
, exclude_columns = exclude_columns
)
return feature
def to_pandas(id:int, columns:list=None, samples:list=None):
dataset = Dataset.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
if (dataset.dataset_type == 'tabular'):
df = Dataset.Tabular.to_pandas(id=dataset.id, columns=columns, samples=samples)
elif (dataset.dataset_type == 'text'):
df = Dataset.Text.to_pandas(id=dataset.id, columns=columns, samples=samples)
elif ((dataset.dataset_type == 'image') or (dataset.dataset_type == 'sequence')):
raise ValueError("\nYikes - `dataset_type={dataset.dataset_type}` does not have a `to_pandas()` method.\n")
return df
def to_numpy(id:int, columns:list=None, samples:list=None):
dataset = Dataset.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
if (dataset.dataset_type == 'tabular'):
arr = Dataset.Tabular.to_numpy(id=id, columns=columns, samples=samples)
elif (dataset.dataset_type == 'image'):
if (columns is not None):
raise ValueError("\nYikes - `Dataset.Image.to_numpy` does not accept a `columns` argument.\n")
arr = Dataset.Image.to_numpy(id=id, samples=samples)
elif (dataset.dataset_type == 'text'):
arr = Dataset.Text.to_numpy(id=id, columns=columns, samples=samples)
elif (dataset.dataset_type == 'sequence'):
arr = Dataset.Sequence.to_numpy(id=id, columns=columns, samples=samples)
return arr
def to_strings(id:int, samples:list=None):
dataset = Dataset.get_by_id(id)
samples = listify(samples)
if (dataset.dataset_type == 'tabular' or dataset.dataset_type == 'image'):
raise ValueError("\nYikes - This Dataset class does not have a `to_strings()` method.\n")
elif (dataset.dataset_type == 'text'):
return Dataset.Text.to_strings(id=dataset.id, samples=samples)
def sorted_file_list(dir_path:str):
if (not os.path.exists(dir_path)):
raise ValueError(f"\nYikes - The path you provided does not exist according to `os.path.exists(dir_path)`:\n{dir_path}\n")
path = os.path.abspath(dir_path)
if (os.path.isdir(path) == False):
raise ValueError(f"\nYikes - The path that you provided is not a directory:{path}\n")
file_paths = os.listdir(path)
# prune hidden files and directories.
file_paths = [f for f in file_paths if not f.startswith('.')]
file_paths = [f for f in file_paths if not os.path.isdir(f)]
if not file_paths:
raise ValueError(f"\nYikes - The directory that you provided has no files in it:{path}\n")
# folder path is already absolute
file_paths = [os.path.join(path, f) for f in file_paths]
file_paths = natsorted(file_paths)
return file_paths
def get_main_file(id:int):
dataset = Dataset.get_by_id(id)
if (dataset.dataset_type == 'image'):
raise ValueError("\n Dataset class does not support get_main_file() method for `image` data type,\n")
file = File.select().join(Dataset).where(
Dataset.id==id, File.file_type=='tabular', File.file_index==0
)[0]
return file
def get_main_tabular(id:int):
"""
Works on both `Dataset.Tabular`, `Dataset.Sequence`, and `Dataset.Text`
"""
file = Dataset.get_main_file(id)
return file.tabulars[0]
def arr_validate(ndarray):
if (type(ndarray).__name__ != 'ndarray'):
raise ValueError("\nYikes - The `ndarray` you provided is not of the type 'ndarray'.\n")
if (ndarray.dtype.names is not None):
raise ValueError(dedent("""
Yikes - Sorry, we do not support NumPy Structured Arrays.
However, you can use the `dtype` dict and `column_names` to handle each column specifically.
"""))
if (ndarray.size == 0):
raise ValueError("\nYikes - The ndarray you provided is empty: `ndarray.size == 0`.\n")
class Tabular():
"""
- Does not inherit the Dataset class e.g. `class Tabular(Dataset):`
because then ORM would make a separate table for it.
- It is just a collection of methods and default variables.
"""
dataset_type = 'tabular'
file_index = 0
file_count = 1
def from_path(
file_path:str
, source_file_format:str
, name:str = None
, dtype:object = None
, column_names:list = None
, skip_header_rows:object = 'infer'
, ingest:bool = True
):
column_names = listify(column_names)
accepted_formats = ['csv', 'tsv', 'parquet']
if (source_file_format not in accepted_formats):
raise ValueError(f"\nYikes - Available file formats include csv, tsv, and parquet.\nYour file format: {source_file_format}\n")
if (not os.path.exists(file_path)):
raise ValueError(f"\nYikes - The path you provided does not exist according to `os.path.exists(file_path)`:\n{file_path}\n")
if (not os.path.isfile(file_path)):
raise ValueError(dedent(
f"Yikes - The path you provided is a directory according to `os.path.isfile(file_path)`:" \
f"{file_path}" \
f"But `dataset_type=='tabular'` only supports a single file, not an entire directory.`"
))
# Use the raw, not absolute path for the name.
if (name is None):
name = file_path
source_path = os.path.abspath(file_path)
dataset = Dataset.create(
dataset_type = Dataset.Tabular.dataset_type
, file_count = Dataset.Tabular.file_count
, source_path = source_path
, name = name
)
try:
File.Tabular.from_file(
path = file_path
, source_file_format = source_file_format
, dtype = dtype
, column_names = column_names
, skip_header_rows = skip_header_rows
, ingest = ingest
, dataset_id = dataset.id
)
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def from_pandas(
dataframe:object
, name:str = None
, dtype:object = None
, column_names:list = None
):
column_names = listify(column_names)
if (type(dataframe).__name__ != 'DataFrame'):
raise ValueError("\nYikes - The `dataframe` you provided is not `type(dataframe).__name__ == 'DataFrame'`\n")
dataset = Dataset.create(
file_count = Dataset.Tabular.file_count
, dataset_type = Dataset.Tabular.dataset_type
, name = name
, source_path = None
)
try:
File.Tabular.from_pandas(
dataframe = dataframe
, dtype = dtype
, column_names = column_names
, dataset_id = dataset.id
)
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def from_numpy(
ndarray:object
, name:str = None
, dtype:object = None
, column_names:list = None
):
column_names = listify(column_names)
Dataset.arr_validate(ndarray)
dimensions = len(ndarray.shape)
if (dimensions > 2) or (dimensions < 1):
raise ValueError(dedent(f"""
Yikes - Tabular Datasets only support 1D and 2D arrays.
Your array dimensions had <{dimensions}> dimensions.
"""))
dataset = Dataset.create(
file_count = Dataset.Tabular.file_count
, name = name
, source_path = None
, dataset_type = Dataset.Tabular.dataset_type
)
try:
File.Tabular.from_numpy(
ndarray = ndarray
, dtype = dtype
, column_names = column_names
, dataset_id = dataset.id
)
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def to_pandas(
id:int
, columns:list = None
, samples:list = None
):
file = Dataset.get_main_file(id)#`id` belongs to dataset, not file
columns = listify(columns)
samples = listify(samples)
df = File.Tabular.to_pandas(id=file.id, samples=samples, columns=columns)
return df
def to_numpy(
id:int
, columns:list = None
, samples:list = None
):
dataset = Dataset.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
# This calls the method above. It does not need `.Tabular`
df = dataset.to_pandas(columns=columns, samples=samples)
ndarray = df.to_numpy()
return ndarray
class Image():
dataset_type = 'image'
def from_folder(
folder_path:str
, name:str = None
, pillow_save:dict = {}
, ingest:bool = True
):
if ((pillow_save!={}) and (ingest==False)):
raise ValueError("\nYikes - `pillow_save` cannot be defined if `ingest==False`.\n")
if (name is None):
name = folder_path
source_path = os.path.abspath(folder_path)
file_paths = Dataset.sorted_file_list(source_path)
file_count = len(file_paths)
dataset = Dataset.create(
file_count = file_count
, name = name
, source_path = source_path
, dataset_type = Dataset.Image.dataset_type
)
#Make sure the shape and mode of each image are the same before writing the Dataset.
sizes = []
modes = []
for i, path in enumerate(tqdm(
file_paths
, desc = "🖼️ Validating Images 🖼️"
, ncols = 85
)):
img = Imaje.open(path)
sizes.append(img.size)
modes.append(img.mode)
if (len(set(sizes)) > 1):
dataset.delete_instance()# Orphaned.
raise ValueError(dedent(f"""
Yikes - All images in the Dataset must be of the same width and height.
`PIL.Image.size`\nHere are the unique sizes you provided:\n{set(sizes)}
"""))
elif (len(set(modes)) > 1):
dataset.delete_instance()# Orphaned.
raise ValueError(dedent(f"""
Yikes - All images in the Dataset must be of the same mode aka colorscale.
`PIL.Image.mode`\nHere are the unique modes you provided:\n{set(modes)}
"""))
try:
for i, p in enumerate(tqdm(
file_paths
, desc = "🖼️ Ingesting Images 🖼️"
, ncols = 85
)):
File.Image.from_file(
path = p
, pillow_save = pillow_save
, file_index = i
, ingest = ingest
, dataset_id = dataset.id
)
except:
dataset.delete_instance()
raise
return dataset
def from_urls(
urls:list
, pillow_save:dict = {}
, name:str = None
, source_path:str = None
, ingest:bool = True
):
if ((pillow_save!={}) and (ingest==False)):
raise ValueError("\nYikes - `pillow_save` cannot be defined if `ingest==False`.\n")
urls = listify(urls)
for u in urls:
validation = validators.url(u)
if (validation != True): #`== False` doesn't work.
raise ValueError(f"\nYikes - Invalid url detected within `urls` list:\n'{u}'\n")
file_count = len(urls)
dataset = Dataset.create(
file_count = file_count
, name = name
, dataset_type = Dataset.Image.dataset_type
, source_path = source_path
)
#Make sure the shape and mode of each image are the same before writing the Dataset.
sizes = []
modes = []
for i, url in enumerate(tqdm(
urls
, desc = "🖼️ Validating Images 🖼️"
, ncols = 85
)):
img = Imaje.open(
requests.get(url, stream=True).raw
)
sizes.append(img.size)
modes.append(img.mode)
if (len(set(sizes)) > 1):
raise ValueError(dedent(f"""
Yikes - All images in the Dataset must be of the same width and height.
`PIL.Image.size`\nHere are the unique sizes you provided:\n{set(sizes)}
"""))
elif (len(set(modes)) > 1):
raise ValueError(dedent(f"""
Yikes - All images in the Dataset must be of the same mode aka colorscale.
`PIL.Image.mode`\nHere are the unique modes you provided:\n{set(modes)}
"""))
try:
for i, url in enumerate(tqdm(
urls
, desc = "🖼️ Ingesting Images 🖼️"
, ncols = 85
)):
File.Image.from_url(
url = url
, pillow_save = pillow_save
, file_index = i
, ingest = ingest
, dataset_id = dataset.id
)
"""
for i, url in enumerate(urls):
file = File.Image.from_url(
url = url
, pillow_save = pillow_save
, file_index = i
, dataset_id = dataset.id
)
"""
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def to_pillow(id:int, samples:list=None):
"""
- This does not have `columns` attrbute because it is only for fetching images.
- Have to fetch as image before feeding into numpy `numpy.array(Image.open())`.
- Future: could return the tabular data along with it.
- Might need this for Preprocess where rotate images and such.
"""
samples = listify(samples)
files = Dataset.Image.get_image_files(id, samples=samples)
images = [f.Image.to_pillow(f.id) for f in files]
return images
def to_numpy(id:int, samples:list=None):
"""
- Because Pillow works directly with numpy, there's no need for pandas right now.
- But downstream methods are using pandas.
"""
samples = listify(samples)
images = Dataset.Image.to_pillow(id, samples=samples)
images = [np.array(img) for img in images]
images = np.array(images)
"""
- Pixel values range from 0-255.
- `np.set_printoptions(threshold=99999)` to inspect for yourself.
- It will look like some are all 0, but that's just the black edges.
"""
images = images/255
return images
def get_image_files(id:int, samples:list=None):
samples = listify(samples)
files = File.select().join(Dataset).where(
Dataset.id==id, File.file_type=='image'
).order_by(File.file_index)# Ascending by default.
# Select from list by index.
if (samples is not None):
files = [files[i] for i in samples]
return files
class Text():
dataset_type = 'text'
file_count = 1
column_name = 'TextData'
def from_strings(
strings: list,
name: str = None
):
for expectedString in strings:
if type(expectedString) != str:
raise ValueError(f'\nThe input contains an object of type non-str type: {type(expectedString)}')
dataframe = pd.DataFrame(strings, columns=[Dataset.Text.column_name], dtype="object")
return Dataset.Text.from_pandas(dataframe, name)
def from_pandas(
dataframe:object,
name:str = None,
dtype:object = None,
column_names:list = None
):
if Dataset.Text.column_name not in list(dataframe.columns):
raise ValueError("\nYikes - The `dataframe` you provided doesn't contain 'TextData' column. Please rename the column containing text data to 'TextData'`\n")
if dataframe[Dataset.Text.column_name].dtypes != 'O':
raise ValueError("\nYikes - The `dataframe` you provided contains 'TextData' column with incorrect dtype: column dtype != object\n")
dataset = Dataset.Tabular.from_pandas(dataframe, name, dtype, column_names)
dataset.dataset_type = Dataset.Text.dataset_type
dataset.save()
return dataset
def from_path(
file_path:str
, source_file_format:str
, name:str = None
, dtype:object = None
, column_names:list = None
, skip_header_rows:object = 'infer'
):
dataset = Dataset.Tabular.from_path(file_path, source_file_format, name, dtype, column_names, skip_header_rows)
dataset.dataset_type = Dataset.Text.dataset_type
dataset.save()
return dataset
def from_folder(
folder_path:str,
name:str = None
):
if name is None:
name = folder_path
source_path = os.path.abspath(folder_path)
input_files = Dataset.sorted_file_list(source_path)
files_data = []
for input_file in input_files:
with open(input_file, 'r') as file_pointer:
files_data.extend([file_pointer.read()])
return Dataset.Text.from_strings(files_data, name)
def to_pandas(
id:int,
columns:list = None,
samples:list = None
):
df = Dataset.Tabular.to_pandas(id, columns, samples)
if Dataset.Text.column_name not in columns:
return df
word_counts, feature_names = Dataset.Text.get_feature_matrix(df)
df = pd.DataFrame(word_counts.todense(), columns = feature_names)
return df
def to_numpy(
id:int,
columns:list = None,
samples:list = None
):
df = Dataset.Tabular.to_pandas(id, columns, samples)
if Dataset.Text.column_name not in columns:
return df.to_numpy()
word_counts, feature_names = Dataset.Text.get_feature_matrix(df)
return word_counts.todense()
def get_feature_matrix(
dataframe:object
):
count_vect = CountVectorizer(max_features = 200)
word_counts = count_vect.fit_transform(dataframe[Dataset.Text.column_name].tolist())
return word_counts, count_vect.get_feature_names()
def to_strings(
id:int,
samples:list = None
):
data_df = Dataset.Tabular.to_pandas(id, [Dataset.Text.column_name], samples)
return data_df[Dataset.Text.column_name].tolist()
class Sequence():
dataset_type = 'sequence'
def from_numpy(
ndarray3D_or_npyPath:object
, name:str = None
, dtype:object = None
, column_names:list = None
, ingest:bool = True
):
if ((ingest==False) and (isinstance(dtype, dict))):
raise ValueError("\nYikes - If `ingest==False` then `dtype` must be either a str or a single NumPy-based type.\n")
# Fetch array from .npy if it is not an in-memory array.
if (str(ndarray3D_or_npyPath.__class__) != "<class 'numpy.ndarray'>"):
if (not isinstance(ndarray3D_or_npyPath, str)):
raise ValueError("\nYikes - If `ndarray3D_or_npyPath` is not an array then it must be a string-based path.\n")
if (not os.path.exists(ndarray3D_or_npyPath)):
raise ValueError("\nYikes - The path you provided does not exist according to `os.path.exists(ndarray3D_or_npyPath)`\n")
if (not os.path.isfile(ndarray3D_or_npyPath)):
raise ValueError("\nYikes - The path you provided is not a file according to `os.path.isfile(ndarray3D_or_npyPath)`\n")
source_path = ndarray3D_or_npyPath
try:
# `allow_pickle=False` prevented it from reading the file.
ndarray_3D = np.load(file=ndarray3D_or_npyPath)
except:
print("\nYikes - Failed to `np.load(file=ndarray3D_or_npyPath)` with your `ndarray3D_or_npyPath`:\n")
print(f"{ndarray3D_or_npyPath}\n")
raise
elif (str(ndarray3D_or_npyPath.__class__) == "<class 'numpy.ndarray'>"):
source_path = None
ndarray_3D = ndarray3D_or_npyPath
column_names = listify(column_names)
Dataset.arr_validate(ndarray_3D)
dimensions = len(ndarray_3D.shape)
if (dimensions != 3):
raise ValueError(dedent(f"""
Yikes - Sequence Datasets can only be constructed from 3D arrays.
Your array dimensions had <{dimensions}> dimensions.
"""))
file_count = len(ndarray_3D)
dataset = Dataset.create(
file_count = file_count
, name = name
, dataset_type = Dataset.Sequence.dataset_type
, source_path = source_path
)
#Make sure the shape and mode of each image are the same before writing the Dataset.
shapes = []
for i, arr in enumerate(tqdm(
ndarray_3D
, desc = "⏱️ Validating Sequences 🧬"
, ncols = 85
)):
shapes.append(arr.shape)
if (len(set(shapes)) > 1):
dataset.delete_instance()# Orphaned.
raise ValueError(dedent(f"""
Yikes - All 2D arrays in the Dataset must be of the shape.
`ndarray.shape`\nHere are the unique sizes you provided:\n{set(shapes)}
"""))
try:
for i, arr in enumerate(tqdm(
ndarray_3D
, desc = "⏱️ Ingesting Sequences 🧬"
, ncols = 85
)):
File.Tabular.from_numpy(
ndarray = arr
, dataset_id = dataset.id
, column_names = column_names
, dtype = dtype
, _file_index = i
, ingest = ingest
)
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def to_numpy(
id:int,
columns:list = None,
samples:list = None
):
dataset = Dataset.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
if (samples is None):
files = dataset.files
elif (samples is not None):
# Here the 'sample' is the entire file. Whereas, in 2D 'sample==row'.
# So run a query to get those files: `<<` means `in`.
files = File.select().join(Dataset).where(
Dataset.id==dataset.id, File.file_index<<samples
)
files = list(files)
# Then call them with the column filter.
# So don't pass `samples=samples` to the file.
list_2D = [f.to_numpy(columns=columns) for f in files]
arr_3D = np.array(list_2D)
return arr_3D
# Graph
# handle nodes and edges as separate tabular types?
# node_data is pretty much tabular sequence (varied length) data right down to the columns.
# the only unique thing is an edge_data for each Graph file.
# attach multiple file types to a file File(id=1).tabular, File(id=1).graph?
class File(BaseModel):
"""
- Due to the fact that different types of Files have different attributes
(e.g. File.Tabular columns=JSON or File.Graph nodes=Blob, edges=Blob),
I am making each file type its own subclass and 1-1 table. This approach
allows for the creation of custom File types.
- If `blob=None` then isn't persisted therefore fetch from source_path or s3_path.
- Note that `dtype` does not require every column to be included as a key in the dictionary.
"""
file_type = CharField()
file_format = CharField() # png, jpg, parquet.
file_index = IntegerField() # image, sequence, graph.
shape = JSONField()
is_ingested = BooleanField()
skip_header_rows = PickleField(null=True) #Image does not have.
source_path = CharField(null=True) # when `from_numpy` or `from_pandas`.
blob = BlobField(null=True) # when `is_ingested==False`.
dataset = ForeignKeyField(Dataset, backref='files')
"""
Classes are much cleaner than a knot of if statements in every method,
and `=None` for every parameter.
"""
def to_numpy(id:int, columns:list=None, samples:list=None):
file = File.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
if (file.file_type == 'tabular'):
arr = File.Tabular.to_numpy(id=id, columns=columns, samples=samples)
elif (file.file_type == 'image'):
arr = File.Image.to_numpy(id=id, columns=columns, samples=samples)
return arr
class Tabular():
file_type = 'tabular'
def from_pandas(
dataframe:object
, dataset_id:int
, dtype:object = None # Accepts a single str for the entire df, but utlimate it gets saved as one dtype per column.
, column_names:list = None
, source_path:str = None # passed in via from_file, but not from_numpy.
, ingest:bool = True # from_file() method overwrites this.
, file_format:str = 'parquet' # from_file() method overwrites this.
, skip_header_rows:int = 'infer'
, _file_index:int = 0 # Dataset.Sequence overwrites this.
):
column_names = listify(column_names)
File.Tabular.df_validate(dataframe, column_names)
# We need this metadata whether ingested or not.
dataframe, columns, shape, dtype = File.Tabular.df_set_metadata(
dataframe=dataframe, column_names=column_names, dtype=dtype
)
if (ingest==True):
blob = File.Tabular.df_to_compressed_parquet_bytes(dataframe)
elif (ingest==False):
blob = None
dataset = Dataset.get_by_id(dataset_id)
file = File.create(
blob = blob
, file_type = File.Tabular.file_type
, file_format = file_format
, file_index = _file_index
, shape = shape
, source_path = source_path
, skip_header_rows = skip_header_rows
, is_ingested = ingest
, dataset = dataset
)
try:
Tabular.create(
columns = columns
, dtypes = dtype
, file_id = file.id
)
except:
file.delete_instance() # Orphaned.
raise
return file
def from_numpy(
ndarray:object
, dataset_id:int
, column_names:list = None
, dtype:object = None #Or single string.
, _file_index:int = 0
, ingest:bool = True
):
column_names = listify(column_names)
"""
Only supporting homogenous arrays because structured arrays are a pain
when it comes time to convert them to dataframes. It complained about
setting an index, scalar types, and dimensionality... yikes.
Homogenous arrays keep dtype in `arr.dtype==dtype('int64')`
Structured arrays keep column names in `arr.dtype.names==('ID', 'Ring')`
Per column dtypes dtypes from structured array <https://stackoverflow.com/a/65224410/5739514>
"""
Dataset.arr_validate(ndarray)
"""
column_names and dict-based dtype will be handled by our `from_pandas()` method.
`pd.DataFrame` method only accepts a single dtype str, or infers if None.
"""
df = | pd.DataFrame(data=ndarray) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Avatar : The Last Airbender
# ### Machine Learning and Analysis of the show
# In[1]:
from IPython.display import Image
Image (filename = "images (1).jpg")
# ## Introduction :
#
# **Avatar: The Last Airbender (Avatar: The Legend of Aang in some regions)** is an American animated television series created by <NAME> and <NAME>, with <NAME> as head writer. It aired on Nickelodeon for three seasons, from February 2005 to July 2008. Avatar is set in an Asiatic-like world in which some people can manipulate one of the four elements—water, earth, fire, or air—with telekinetic variants of the Chinese martial arts known as "bending". The only individual who can bend all four elements, the "Avatar", is responsible for maintaining harmony between the world's four nations, and serves as the bridge between the spirit world and the physical world. The show is presented in a style that combines anime with American cartoons, and relies on the imagery of mainly East Asian culture, with some South Asian, New World, and Inuit and Sireniki influences.
#
#
# The series is centered around the journey of 12-year-old Aang, the current Avatar and last survivor of his nation, the Air Nomads, along with his friends Sokka, Katara, and later Toph, as they strive to end the Fire Nation's war against the other nations of the world. It also follows the story of Zuko—the exiled prince of the Fire Nation, seeking to restore his lost honor by capturing Aang, accompanied by his wise uncle Iroh—and later, that of his ambitious sister Azula.
# In[2]:
import pandas as pd
import numpy as np
import plotly_express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from plotly.offline import init_notebook_mode
init_notebook_mode()
# In[3]:
data = pd.read_csv('avatar_data.csv')
series = | pd.read_csv('series_names.csv') | pandas.read_csv |
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Routines to compute and compare the "signatures" of objective functions. These are useful to make sure two different
studies were actually optimizing the same objective function (even if they say the same test case in the meta-data).
"""
import warnings
import numpy as np
import pandas as pd
import bayesmark.random_search as rs
# How many points to probe the function to get the signature
N_SUGGESTIONS = 5
def get_func_signature(f, api_config):
"""Get the function signature for an objective function in an experiment.
Parameters
----------
f : typing.Callable
The objective function we want to compute the signature of. This function must take inputs in the form of
``dict(str, object)`` with one dictionary key per variable, and provide `float` as the output.
api_config : dict(str, dict)
Configuration of the optimization variables. See API description.
Returns
-------
signature_x : list(dict(str, object)) of shape (n_suggest,)
The input locations probed on signature call.
signature_y : list(float) of shape (n_suggest,)
The objective function values at the inputs points. This is the real signature.
"""
# Make sure get same sequence on every call to be a signature
random = np.random.RandomState(0)
signature_x = rs.suggest_dict([], [], api_config, n_suggestions=N_SUGGESTIONS, random=random)
# For now, we only take the first output as the signature. We can generalize this later.
signature_y = [f(xx)[0] for xx in signature_x]
assert np.all(np.isfinite(signature_y)), "non-finite values found in signature for function"
return signature_x, signature_y
def analyze_signatures(signatures):
"""Analyze function signatures from the experiment.
Parameters
----------
signatures : dict(str, list(list(float)))
The signatures should all be the same length, so it should be 2D array
like.
Returns
-------
sig_errs : :class:`pandas:pandas.DataFrame`
rows are test cases, columns are test points.
signatures_median : dict(str, list(float))
Median signature across all repetition per test case.
"""
sig_errs = {}
signatures_median = {}
for test_case, signature_y in signatures.items():
assert len(signature_y) > 0, "signature with no cases found"
assert np.all(np.isfinite(signature_y)), "non-finite values found in signature for function"
minval = np.min(signature_y, axis=0)
maxval = np.max(signature_y, axis=0)
if not np.allclose(minval, maxval):
# Arguably, the util should not raise the warning, and these should
# be raised on the outside, but let's do this for simplicity.
warnings.warn(
"Signature diverged on %s betwen %s and %s" % (test_case, str(minval), str(maxval)), RuntimeWarning
)
sig_errs[test_case] = maxval - minval
# ensure serializable using tolist
signatures_median[test_case] = np.median(signature_y, axis=0).tolist()
# Convert to pandas so easy to append margins with max, better for disp.
# If we let the user convert to pandas then we don't need dep on pandas.
sig_errs = | pd.DataFrame(sig_errs) | pandas.DataFrame |
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import sklearn
import json
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
import warnings
warnings.simplefilter('ignore')
testset = pd.read_csv('./public/Python_Scripts/Test.csv', header=None)
dataset = | pd.read_csv('./public/Python_Scripts/Dataset.csv', header=None) | pandas.read_csv |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
tm.assert_frame_equal(df, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = tm.makeCustomDataframe(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.map(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_headers__") as path:
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=["A", "B"])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
tm.assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
tm.assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return DataFrame(
np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(df, result)
# column is mi
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(df, result)
# dup column names?
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
tm.assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert com.all_none(*result.columns.names)
result.columns.names = df.columns.names
tm.assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(path)
for i in [6, 7]:
msg = f"len of {i}, but only 5 lines in file"
with pytest.raises(ParserError, match=msg):
read_csv(path, header=list(range(i)), index_col=0)
# write with cols
msg = "cannot specify cols with a MultiIndex"
with pytest.raises(TypeError, match=msg):
df.to_csv(path, columns=["foo", "bar"])
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# empty
tsframe[:0].to_csv(path)
recons = self.read_csv(path)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_interval_index(self):
# GH 28210
df = DataFrame({"A": list("abc"), "B": range(3)}, index=pd.interval_range(0, 3))
with tm.ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
df.to_csv(path)
result = self.read_csv(path, index_col=0)
# can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
expected = df.copy()
expected.index = expected.index.astype(str)
tm.assert_frame_equal(result, expected)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with tm.ensure_clean("__tmp_to_csv_float32_nanrep__.csv") as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
assert lines[1].split(",")[2] == "999"
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({"A": [1, 2, 3], "B": ["5,6", "7,8", "9,0"]})
with tm.ensure_clean("__tmp_to_csv_withcommas__.csv") as path:
df.to_csv(path)
df2 = self.read_csv(path)
tm.assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return [f"{name}{i:03d}" for i in range(5)]
df_float = DataFrame(
np.random.randn(100, 5), dtype="float64", columns=create_cols("float")
)
df_int = DataFrame(
np.random.randn(100, 5).astype("int64"),
dtype="int64",
columns=create_cols("int"),
)
df_bool = DataFrame(True, index=df_float.index, columns=create_cols("bool"))
df_object = DataFrame(
"foo", index=df_float.index, columns=create_cols("object")
)
df_dt = DataFrame(
Timestamp("20010101"), index=df_float.index, columns=create_cols("date")
)
# add in some nans
df_float.iloc[30:50, 1:3] = np.nan
# ## this is a bug in read_csv right now ####
# df_dt.loc[30:50,1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = {}
for n, dtype in [
("float", np.float64),
("int", np.int64),
("bool", np.bool_),
("object", object),
]:
for c in create_cols(n):
dtypes[c] = dtype
with tm.ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(
filename, index_col=0, dtype=dtypes, parse_dates=create_cols("date")
)
tm.assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(
np.random.randn(1000, 30),
columns=list(range(15)) + list(range(15)),
dtype="float64",
)
with tm.ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename, index_col=0)
result.columns = df.columns
tm.assert_frame_equal(result, df)
df_float = DataFrame(np.random.randn(1000, 3), dtype="float64")
df_int = DataFrame(np.random.randn(1000, 3)).astype("int64")
df_bool = DataFrame(True, index=df_float.index, columns=range(3))
df_object = DataFrame("foo", index=df_float.index, columns=range(3))
df_dt = DataFrame(Timestamp("20010101"), index=df_float.index, columns=range(3))
df = pd.concat(
[df_float, df_int, df_bool, df_object, df_dt], axis=1, ignore_index=True
)
df.columns = [0, 1, 2] * 5
with tm.ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename, index_col=0)
# date cols
for i in ["0.4", "1.4", "2.4"]:
result[i] = to_datetime(result[i])
result.columns = df.columns
tm.assert_frame_equal(result, df)
# GH3457
N = 10
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
with tm.ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename, index_col=0)
result = result.rename(columns={"a.1": "a"})
tm.assert_frame_equal(result, df)
def test_to_csv_chunking(self):
aa = DataFrame({"A": range(100000)})
aa["B"] = aa.A + 1.0
aa["C"] = aa.A + 2.0
aa["D"] = aa.A + 3.0
for chunksize in [10000, 50000, 100000]:
with tm.ensure_clean() as filename:
aa.to_csv(filename, chunksize=chunksize)
rs = read_csv(filename, index_col=0)
tm.assert_frame_equal(rs, aa)
@pytest.mark.slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with tm.ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
tm.assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO("a,1.0\nb,2.0")
df = self.read_csv(f1, header=None)
newdf = DataFrame({"t": df[df.columns[0]]})
with tm.ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
# don't check_names as t != 1
tm.assert_frame_equal(recons, newdf, check_names=False)
def test_to_csv_unicode(self):
df = DataFrame({"c/\u03c3": [1, 2, 3]})
with tm.ensure_clean() as path:
df.to_csv(path, encoding="UTF-8")
df2 = | read_csv(path, index_col=0, encoding="UTF-8") | pandas.read_csv |
import numpy as np
import xarray as xr
import pandas as pd
import os
from collections import OrderedDict
# from astropy.time import Time
import logging
import copy
from typing import List, Dict, Union, Tuple
import pysagereader
class SAGEIILoaderV700(object):
"""
Class designed to load the v7.00 SAGE II spec and index files provided by NASA ADSC into python
Data files must be accessible by the users machine, and can be downloaded from:
https://eosweb.larc.nasa.gov/project/sage2/sage2_v7_table
Parameters
----------
data_folder
location of sage ii index and spec files.
output_format
format for the output data. If ``'xarray'`` the output is returned as an ``xarray.Dataset``.
If None the output is returned as a dictionary of numpy arrays.
**NOTE: the following options only apply to xarray output types**
species
Species to be returned in the output data. If None all species are returned. Options are
``aerosol``, ``ozone``, ``h2o``, and ``no2``. If more than one species is returned fields will be NaN-padded
where data is not available. ``species`` is only used if ``'xarray'`` is set as the ``output_data`` format,
otherwise it has no effect.
cf_names
If True then CF-1.7 naming conventions are used for the output_data when ``xarray`` is selected.
filter_aerosol
filter the aerosol using the cloud flag
filter_ozone
filter the ozone using the criteria recommended in the release notes
* Exclusion of all data points with an uncertainty estimate of 300% or greater
* Exclusion of all profiles with an uncertainty greater than 10% between 30 and 50 km
* Exclusion of all data points at altitude and below the occurrence of an aerosol extinction value of
greater than 0.006 km^-1
* Exclusion of all data points at altitude and below the occurrence of both the 525nm aerosol extinction
value exceeding 0.001 km^-1 and the 525/1020 extinction ratio falling below 1.4
* Exclusion of all data points below 35km an 200% or larger uncertainty estimate
enumerate_flags
expand the index and species flags to their boolean values.
normalize_percent_error
give the species error as percent rather than percent * 100
return_separate_flags
return the enumerated flags as a separate data array
Example
-------
>>> sage = SAGEIILoaderV700()
>>> sage.data_folder = 'path/to/data'
>>> data = sage.load_data('2004-1-1','2004-5-1')
In addition to the sage ii fields reported in the files, two additional time fields are provided
to allow for easier subsetting of the data.
``data['mjd']`` is a numpy array containing the modified julian dates of each scan
``date['time']`` is an pandas time series object containing the times of each scan
"""
def __init__(self, data_folder: str=None, output_format: str='xarray', species: List[str]=('aerosol', 'h2o', 'no2', 'ozone', 'background'),
cf_names: bool=False, filter_aerosol: bool=False, filter_ozone: bool=False,
enumerate_flags: bool=False, normalize_percent_error: bool=False, return_separate_flags: bool=False):
if type(species) == str:
species = [species]
self.data_folder = data_folder # Type: str
self.version = '7.00'
self.index_file = 'SAGE_II_INDEX_'
self.spec_file = 'SAGE_II_SPEC_'
self.fill_value = np.nan
self.spec_format = self.get_spec_format()
self.index_format = self.get_index_format()
self.output_format = output_format
self.species = [s.lower() for s in species]
self.cf_names = cf_names
self.filter_aerosol = filter_aerosol
self.filter_ozone = filter_ozone
self.normalize_percent_error = normalize_percent_error
self.enumerate_flags = enumerate_flags
self.return_separate_flags = return_separate_flags
@staticmethod
def get_spec_format() -> Dict[str, Tuple[str, int]]:
"""
spec format taken from sg2_specinfo.pro provided in the v7.00 download
used for reading the binary data format
Returns
-------
Dict
Ordered dictionary of variables provided in the spec file. Each dictionary field contains a
tuple with the information (data type, number of data points). Ordering is important as the
sage ii binary files are read sequentially.
"""
spec = OrderedDict()
spec['Tan_Alt'] = ('float32', 8) # Subtangent Altitudes(km)
spec['Tan_Lat'] = ('float32', 8) # Subtangent Latitudes @ Tan_Alt(deg)
spec['Tan_Lon'] = ('float32', 8) # Subtangent Longitudes @ Tan_Alt(deg)
spec['NMC_Pres'] = ('float32', 140) # Gridded Pressure profile(mb)
spec['NMC_Temp'] = ('float32', 140) # Gridded Temperature profile(K)
spec['NMC_Dens'] = ('float32', 140) # Gridded Density profile(cm ^ (-3))
spec['NMC_Dens_Err'] = ('int16', 140) # Error in NMC_Dens( % * 1000)
spec['Trop_Height'] = ('float32', 1) # NMC Tropopause Height(km)
spec['Wavelength'] = ('float32', 7) # Wavelength of each channel(nm)
spec['O3'] = ('float32', 140) # O3 Density profile 0 - 70 Km(cm ^ (-3))
spec['NO2'] = ('float32', 100) # NO2 Density profile 0 - 50 Km(cm ^ (-3))
spec['H2O'] = ('float32', 100) # H2O Volume Mixing Ratio 0 - 50 Km(ppp)
spec['Ext386'] = ('float32', 80) # 386 nm Extinction 0 - 40 Km(1 / km)
spec['Ext452'] = ('float32', 80) # 452 nm Extinction 0 - 40 Km(1 / km)
spec['Ext525'] = ('float32', 80) # 525 nm Extinction 0 - 40 Km(1 / km)
spec['Ext1020'] = ('float32', 80) # 1020 nm Extinction 0 - 40 Km(1 / km)
spec['Density'] = ('float32', 140) # Calculated Density 0 - 70 Km(cm ^ (-3))
spec['SurfDen'] = ('float32', 80) # Aerosol surface area dens 0 - 40 km(um ^ 2 / cm ^ 3)
spec['Radius'] = ('float32', 80) # Aerosol effective radius 0 - 40 km(um)
spec['Dens_Mid_Atm'] = ('float32', 70) # Middle Atmosphere Density(cm ^ (-3))
spec['O3_Err'] = ('int16', 140) # Error in O3 density profile( % * 100)
spec['NO2_Err'] = ('int16', 100) # Error in NO2 density profile( % * 100)
spec['H2O_Err'] = ('int16', 100) # Error in H2O mixing ratio( % * 100)
spec['Ext386_Err'] = ('int16', 80) # Error in 386 nm Extinction( % * 100)
spec['Ext452_Err'] = ('int16', 80) # Error in 452 nm Extinction( % * 100)
spec['Ext525_Err'] = ('int16', 80) # Error in 525 nm Extinction( % * 100)
spec['Ext1020_Err'] = ('int16', 80) # Error in 1019 nm Extinction( % * 100)
spec['Density_Err'] = ('int16', 140) # Error in Density( % * 100)
spec['SurfDen_Err'] = ('int16', 80) # Error in surface area dens( % * 100)
spec['Radius_Err'] = ('int16', 80) # Error in aerosol radius( % * 100)
spec['Dens_Mid_Atm_Err'] = ('int16', 70) # Error in Middle Atm.Density( % * 100)
spec['InfVec'] = ('uint16', 140) # Informational Bit flags
return spec
@staticmethod
def get_index_format() -> Dict[str, Tuple[str, int]]:
"""
index format taken from sg2_indexinfo.pro provided in the v7.00 download
used for reading the binary data format
Returns
-------
Dict
an ordered dictionary of variables provided in the index file. Each dictionary
field contains a tuple with the information (data type, length). Ordering is
important as the sage ii binary files are read sequentially.
"""
info = OrderedDict()
info['num_prof'] = ('uint32', 1) # Number of profiles in these files
info['Met_Rev_Date'] = ('uint32', 1) # LaRC Met Model Revision Date(YYYYMMDD)
info['Driver_Rev'] = ('S1', 8) # LaRC Driver Version(e.g. 6.20)
info['Trans_Rev'] = ('S1', 8) # LaRC Transmission Version
info['Inv_Rev'] = ('S1', 8) # LaRC Inversion Version
info['Spec_Rev'] = ('S1', 8) # LaRC Inversion Version
info['Eph_File_Name'] = ('S1', 32) # Ephemeris data file name
info['Met_File_Name'] = ('S1', 32) # Meteorological data file name
info['Ref_File_Name'] = ('S1', 32) # Refraction data file name
info['Tran_File_Name'] = ('S1', 32) # Transmission data file name
info['Spec_File_Name'] = ('S1', 32) # Species profile file name
info['FillVal'] = ('float32', 1) # Fill value
# Altitude grid and range info
info['Grid_Size'] = ('float32', 1) # Altitude grid spacing(0.5 km)
info['Alt_Grid'] = ('float32', 200) # Geometric altitudes(0.5, 1.0, ..., 100.0 km)
info['Alt_Mid_Atm'] = ('float32', 70) # Middle atmosphere geometric altitudes
info['Range_Trans'] = ('float32', 2) # Transmission min & max altitudes[0.5, 100.]
info['Range_O3'] = ('float32', 2) # Ozone min & max altitudes[0.5, 70.0]
info['Range_NO2'] = ('float32', 2) # NO2 min & max altitudes[0.5, 50.0]
info['Range_H2O'] = ('float32', 2) # Water vapor min & max altitudes[0.5, 50.0]
info['Range_Ext'] = ('float32', 2) # Aerosol extinction min & max altitudes[0.5, 40.0]
info['Range_Dens'] = ('float32', 2) # Density min & max altitudes[0.5, 70.0]
info['Spare'] = ('float32', 2) #
# Event specific info useful for data subsetting
info['YYYYMMDD'] = ('int32', 930) # Event date at 20km subtangent point
info['Event_Num'] = ('int32', 930) # Event number
info['HHMMSS'] = ('int32', 930) # Event time at 20km
info['Day_Frac'] = ('float32', 930) # Time of year(DDD.frac) at 20 km
info['Lat'] = ('float32', 930) # Subtangent latitude at 20 km(-90, +90)
info['Lon'] = ('float32', 930) # Subtangent longitude at 20 km(-180, +180)
info['Beta'] = ('float32', 930) # Spacecraft beta angle(deg)
info['Duration'] = ('float32', 930) # Duration of event(sec)
info['Type_Sat'] = ('int16', 930) # Event Type Instrument(0 = SR, 1 = SS)
info['Type_Tan'] = ('int16', 930) # Event Type Local(0 = SR, 1 = SS)
# Process tracking and flag info
info['Dropped'] = ('int32', 930) # Dropped event flag
info['InfVec'] = ('uint32', 930) # Bit flags relating to processing (
# NOTE: readme_sage2_v6.20.txt says InfVec is 16 bit but appears to actually be 32 (also in IDL software)
# Record creation dates and times
info['Eph_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Eph_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
info['Met_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Met_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
info['Ref_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Ref_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
info['Tran_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Tran_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
info['Spec_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Spec_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
return info
def get_spec_filename(self, year: int, month: int) -> str:
"""
Returns the spec filename given a year and month
Parameters
----------
year
year of the data that will be loaded
month
month of the data that will be loaded
Returns
-------
filename of the spec file where the data is stored
"""
file = os.path.join(self.data_folder,
self.spec_file + str(int(year)) + str(int(month)).zfill(2) + '.' + self.version)
if not os.path.isfile(file):
file = None
return file
def get_index_filename(self, year: int, month: int) -> str:
"""
Returns the index filename given a year and month
Parameters
----------
year
year of the data that will be loaded
month
month of the data that will be loaded
Returns
-------
filename of the index file where the data is stored
"""
file = os.path.join(self.data_folder,
self.index_file + str(int(year)) + str(int(month)).zfill(2) + '.' + self.version)
if not os.path.isfile(file):
file = None
return file
def read_spec_file(self, file: str, num_profiles: int) -> List[Dict]:
"""
Parameters
----------
file
name of the spec file to be read
num_profiles
number of profiles to read from the spec file (usually determined from the index file)
Returns
-------
list of dictionaries containing the spec data. Each list is one event
"""
# load the file into the buffer
file_format = self.spec_format
with open(file, "rb") as f:
buffer = f.read()
# initialize the list of dictionaries
data = [None] * num_profiles
for p in range(num_profiles):
data[p] = dict()
# load the data from the buffer
bidx = 0
for p in range(num_profiles):
for key in file_format.keys():
nbytes = np.dtype(file_format[key][0]).itemsize * file_format[key][1]
data[p][key] = copy.copy(np.frombuffer(buffer[bidx:bidx+nbytes],
dtype=file_format[key][0]))
bidx += nbytes
return data
def read_index_file(self, file: str) -> Dict:
"""
Read the binary file into a python data structure
Parameters
----------
file
filename to be read
Returns
-------
data from the file
"""
file_format = self.index_format
with open(file, "rb") as f:
buffer = f.read()
data = dict()
# load the data from file into a list
bidx = 0
for key in file_format.keys():
nbytes = np.dtype(file_format[key][0]).itemsize * file_format[key][1]
if file_format[key][0] == 'S1':
data[key] = copy.copy(buffer[bidx:bidx + nbytes].decode('utf-8'))
else:
data[key] = copy.copy(np.frombuffer(buffer[bidx:bidx + nbytes], dtype=file_format[key][0]))
if len(data[key]) == 1:
data[key] = data[key][0]
bidx += nbytes
# make a more useable time field
date_str = []
# If the time overflows by less than the scan time just set it to midnight
data['HHMMSS'][(data['HHMMSS'] >= 240000) & (data['HHMMSS'] < (240000 + data['Duration']))] = 235959
# otherwise, set it as invalid
data['HHMMSS'][data['HHMMSS'] >= 240000] = -999
for idx, (ymd, hms) in enumerate(zip(data['YYYYMMDD'], data['HHMMSS'])):
if (ymd < 0) | (hms < 0):
date_str.append('1970-1-1 00:00:00') # invalid sage ii date
else:
hours = int(hms/10000)
mins = int((hms % 10000)/100)
secs = hms % 100
date_str.append(str(ymd)[0:4] + '-' + str(ymd)[4:6].zfill(2) + '-' +
str(ymd)[6::].zfill(2) + ' ' + str(hours).zfill(2) + ':' +
str(mins).zfill(2) + ':' + str(secs).zfill(2))
# data['time'] = Time(date_str, format='iso')
data['time'] = pd.to_datetime(date_str)
data['mjd'] = np.array((data['time'] - pd.Timestamp('1858-11-17')) / pd.Timedelta(1, 'D'))
data['mjd'][data['mjd'] < 40588] = -999 # get rid of invalid dates
return data
def load_data(self, min_date: str, max_date: str,
min_lat: float=-90, max_lat: float=90,
min_lon: float=-180, max_lon: float=360) -> Union[Dict, xr.Dataset]:
"""
Load the SAGE II data for the specified dates and locations.
Parameters
----------
min_date
start date where data will be loaded in iso format, eg: '2004-1-1'
max_date
end date where data will be loaded in iso format, eg: '2004-1-1'
min_lat
minimum latitude (optional)
max_lat
maximum latitude (optional)
min_lon
minimum longitude (optional)
max_lon
maximum longitude (optional)
Returns
-------
Variables are returned as numpy arrays (1 or 2 dimensional depending on the variable)
"""
min_time = pd.Timestamp(min_date)
max_time = pd.Timestamp(max_date)
data = dict()
init = False
# create a list of unique year/month combinations between the start/end dates
uniq = OrderedDict()
for year in [(t.date().year, t.date().month) for t in
pd.date_range(min_time, max_time+pd.Timedelta(27, 'D'), freq='27D')]:
uniq[year] = year
# load in the data from the desired months
for (year, month) in list(uniq.values()):
logging.info('loading data for : ' + str(year) + '/' + str(month))
indx_file = self.get_index_filename(year, month)
# if the file does not exist move on to the next month
if indx_file is None:
continue
indx_data = self.read_index_file(indx_file)
numprof = indx_data['num_prof']
spec_data = self.read_spec_file(self.get_spec_filename(year, month), numprof)
# get rid of the duplicate names for InfVec
for sp in spec_data:
sp['ProfileInfVec'] = copy.copy(sp['InfVec'])
del sp['InfVec']
for key in indx_data.keys():
# get rid of extraneous profiles in the index so index and spec are the same lengths
if hasattr(indx_data[key], '__len__'):
indx_data[key] = np.delete(indx_data[key], np.arange(numprof, 930))
# add the index values to the data set
if key in data.keys():
# we dont want to replicate certain fields
if (key[0:3] != 'Alt') & (key[0:5] != 'Range') & (key[0:7] != 'FillVal'):
data[key] = np.append(data[key], indx_data[key])
else:
if key == 'FillVal':
data[key] = indx_data[key]
else:
data[key] = [indx_data[key]]
# initialize the data dictionaries as lists
if init is False:
for key in spec_data[0].keys():
data[key] = []
init = True
# add the spec values to the data set
for key in spec_data[0].keys():
data[key].append(np.asarray([sp[key] for sp in spec_data]))
# join all of our lists into an array - this could be done more elegantly with vstack to avoid
# the temporary lists, but this is much faster
for key in data.keys():
if key == 'FillVal':
data[key] = float(data[key]) # make this a simple float rather than zero dimensional array
elif len(data[key][0].shape) > 0:
data[key] = np.concatenate(data[key], axis=0)
else:
data[key] = np.asarray(data[key])
data = self.subset_data(data, min_date, max_date, min_lat, max_lat, min_lon, max_lon)
if not data:
return None
if self.output_format == 'xarray':
data = self.convert_to_xarray(data)
return data
@staticmethod
def subset_data(data: Dict, min_date: str, max_date: str,
min_lat: float, max_lat: float,
min_lon: float, max_lon: float) -> Dict:
"""
Removes any data from the dictionary that does not meet the specified time, latitude and longitude requirements.
Parameters
----------
data
dictionary of sage ii data. Must have the fields 'mjd', 'Lat' and 'Lon'. All others are optional
min_date
start date where data will be loaded in iso format, eg: '2004-1-1'
max_date
end date where data will be loaded in iso format, eg: '2004-1-1'
min_lat
minimum latitude (optional)
max_lat
maximum latitude (optional)
min_lon
minimum longitude (optional)
max_lon
maximum longitude (optional)
Returns
-------
returns the dictionary with only data in the valid latitude, longitude and time range
"""
min_mjd = (pd.Timestamp(min_date) - pd.Timestamp('1858-11-17')) / pd.Timedelta(1, 'D')
max_mjd = (pd.Timestamp(max_date) - pd.Timestamp('1858-11-17')) / pd.Timedelta(1, 'D')
good = (data['mjd'] > min_mjd) & (data['mjd'] < max_mjd) & \
(data['Lat'] > min_lat) & (data['Lat'] < max_lat) & \
(data['Lon'] > min_lon) & (data['Lon'] < max_lon)
if np.any(good):
for key in data.keys():
if hasattr(data[key], '__len__'):
if data[key].shape[0] == len(good):
data[key] = data[key][good]
else:
print('no data satisfies the criteria')
data = {}
return data
def convert_to_xarray(self, data: Dict) -> Union[xr.Dataset, Tuple[xr.Dataset, xr.Dataset]]:
"""
Parameters
----------
data
Data from the ``load_data`` function
Returns
-------
data formatted to an xarray Dataset
"""
# split up the fields into one of different sizes and optional returns
fields = dict()
# not currently returned
fields['geometry'] = ['Tan_Alt', 'Tan_Lat', 'Tan_Lon']
fields['flags'] = ['InfVec', 'Dropped']
fields['profile_flags'] = ['ProfileInfVec']
# always returned - 1 per profile
fields['general'] = ['Event_Num', 'Lat', 'Lon', 'Beta', 'Duration', 'Type_Sat', 'Type_Tan', 'Trop_Height']
# optional return parameters
fields['background'] = ['NMC_Pres', 'NMC_Temp', 'NMC_Dens', 'NMC_Dens_Err', 'Density', 'Density_Err']
fields['ozone'] = ['O3', 'O3_Err']
fields['no2'] = ['NO2', 'NO2_Err']
fields['h2o'] = ['H2O', 'H2O_Err']
fields['aerosol'] = ['Ext386', 'Ext452', 'Ext525', 'Ext1020', 'Ext386_Err', 'Ext452_Err', 'Ext525_Err',
'Ext1020_Err']
fields['particle_size'] = ['SurfDen', 'Radius', 'SurfDen_Err', 'Radius_Err']
xr_data = []
index_flags = self.convert_index_bit_flags(data)
species_flags = self.convert_species_bit_flags(data)
time = pd.to_timedelta(data['mjd'], 'D') + pd.Timestamp('1858-11-17')
data['Trop_Height'] = data['Trop_Height'].flatten()
for key in fields['general']:
xr_data.append(xr.DataArray(data[key], coords=[time], dims=['time'], name=key))
if 'aerosol' in self.species or self.filter_ozone: # we need aerosol to filter ozone
altitude = data['Alt_Grid'][0:80]
wavel = np.array([386.0, 452.0, 525.0, 1020.0])
ext = np.array([data['Ext386'], data['Ext452'], data['Ext525'], data['Ext1020']])
xr_data.append(xr.DataArray(ext, coords=[wavel, time, altitude],
dims=['wavelength', 'time', 'Alt_Grid'], name='Ext'))
ext = np.array([data['Ext386_Err'], data['Ext452_Err'], data['Ext525_Err'], data['Ext1020_Err']])
xr_data.append(xr.DataArray(ext, coords=[wavel, time, altitude],
dims=['wavelength', 'time', 'Alt_Grid'], name='Ext_Err'))
for key in fields['particle_size']:
xr_data.append(xr.DataArray(data[key], coords=[time, altitude],
dims=['time', 'Alt_Grid'], name=key))
if 'no2' in self.species:
altitude = data['Alt_Grid'][0:100]
for key in fields['no2']:
xr_data.append(xr.DataArray(data[key], coords=[time, altitude],
dims=['time', 'Alt_Grid'], name=key))
if 'h2o' in self.species:
altitude = data['Alt_Grid'][0:100]
for key in fields['h2o']:
xr_data.append(xr.DataArray(data[key], coords=[time, altitude],
dims=['time', 'Alt_Grid'], name=key))
if any(i in ['ozone', 'o3'] for i in self.species):
altitude = data['Alt_Grid'][0:140]
for key in fields['ozone']:
xr_data.append(xr.DataArray(data[key], coords=[time, altitude],
dims=['time', 'Alt_Grid'], name=key))
if 'background' in self.species:
altitude = data['Alt_Grid'][0:140]
for key in fields['background']:
xr_data.append(xr.DataArray(data[key], coords=[time, altitude],
dims=['time', 'Alt_Grid'], name=key))
xr_data = xr.merge(xr_data)
if self.enumerate_flags:
xr_data = xr.merge([xr_data, index_flags, species_flags])
for var in xr_data.variables.keys():
if xr_data[var].dtype == 'float32' or 'Err' in var:
xr_data[var] = xr_data[var].where(xr_data[var] != data['FillVal'])
# determine cloud filter for aerosol data
cloud_filter = xr.full_like(species_flags.Cloud_Bit_1, fill_value=True, dtype=bool)
min_alt = (xr_data.Alt_Grid * (species_flags.Cloud_Bit_1 & species_flags.Cloud_Bit_2)).max(dim='Alt_Grid')
cloud_filter = cloud_filter.where(cloud_filter.Alt_Grid > min_alt)
xr_data['cloud_filter'] = np.isnan(cloud_filter)
# determine valid ozone altitudes
if any(i in ['ozone', 'o3'] for i in self.species):
# add an ozone filter field for convenience
ozone_good = xr.full_like(species_flags.Cloud_Bit_1, fill_value=True, dtype=bool)
# Exclusion of all data points with an uncertainty estimate of 300% or greater
ozone_good = ozone_good.where(xr_data.O3_Err < 30000)
# Exclusion of all profiles with an uncertainty greater than 10% between 30 and 50 km
no_good = (xr_data.O3_Err > 1000) & (xr_data.Alt_Grid > 30) & (xr_data.Alt_Grid < 50)
ozone_good = ozone_good.where(~no_good)
# Exclusion of all data points at altitude and below the occurrence of an aerosol extinction value of
# greater than 0.006 km^-1
# NOTE: the wavelength to use as the filter is not specified in the documentation, so I have chosen the
# wavelength with the smallest extinction and therefore the strictest filtering
min_alt = (xr_data.Alt_Grid * (xr_data.Ext.sel(wavelength=1020) > 0.006)).max(dim='Alt_Grid')
ozone_good = ozone_good.where(xr_data.Alt_Grid > min_alt)
# Exclusion of all data points at altitude and below the occurrence of both the 525nm aerosol extinction
# value exceeding 0.001 km^-1 and the 525/1020 extinction ratio falling below 1.4
min_alt = (xr_data.Alt_Grid * ((xr_data.Ext.sel(wavelength=525) > 0.001) &
((xr_data.Ext.sel(wavelength=525) / xr_data.Ext.sel(
wavelength=1020)) < 1.4))).max(dim='Alt_Grid')
ozone_good = ozone_good.where(xr_data.Alt_Grid > min_alt)
# Exclusion of all data points below 35km an 200% or larger uncertainty estimate
no_good = (xr_data.O3_Err > 20000) & (xr_data.Alt_Grid < 35)
ozone_good = ~np.isnan(ozone_good.where(~no_good))
xr_data['ozone_filter'] = ozone_good
if self.filter_aerosol:
xr_data['Ext'] = xr_data.Ext.where(~xr_data.cloud_filter)
if self.filter_ozone:
xr_data['O3'] = xr_data.O3.where(ozone_good)
# drop aerosol if not requested
if self.filter_ozone and not ('aerosol' in self.species):
xr_data.drop(['Ext', 'Ext_Err', 'wavelength'])
if self.normalize_percent_error:
for var in xr_data.variables.keys():
if 'Err' in var: # put error units back into percent
xr_data[var] = (xr_data[var] / 100).astype('float32')
xr_data = xr_data.transpose('time', 'Alt_Grid', 'wavelength')
xr_data = self.apply_cf_conventions(xr_data)
if self.return_separate_flags:
return xr_data, xr.merge([index_flags, species_flags])
else:
return xr_data
def apply_cf_conventions(self, data):
attrs = {'time': {'standard_name': 'time'},
'Lat': {'standard_name': 'latitude',
'units': 'degrees_north'},
'Lon': {'standard_name': 'longitude',
'units': 'degrees_east'},
'Alt_Grid': {'units': 'km'},
'wavelength': {'units': 'nm',
'description': 'wavelength at which aerosol extinction is retrieved'},
'O3': {'standard_name': 'number_concentration_of_ozone_molecules_in_air',
'units': 'cm-3'},
'NO2': {'standard_name': 'number_concentration_of_nitrogen_dioxide_molecules_in_air',
'units': 'cm-3'},
'H2O': {'standard_name': 'number_concentration_of_water_vapor_in_air',
'units': 'cm-3'},
'Ext': {'standard_name': 'volume_extinction_coefficient_in_air_due_to_ambient_aerosol_particles',
'units': 'km-1'},
'O3_Err': {'standard_name': 'number_concentration_of_ozone_molecules_in_air_error',
'units': 'percent'},
'NO2_Err': {'standard_name': 'number_concentration_of_nitrogen_dioxide_molecules_in_air_error',
'units': 'percent'},
'H2O_Err': {'standard_name': 'number_concentration_of_water_vapor_in_air_error',
'units': 'percent'},
'Ext_Err': {'standard_name': 'volume_extinction_coefficient_in_air_due_to_ambient_aerosol_'
'particles_error',
'units': 'percent'},
'Duration': {'units': 'seconds',
'description': 'duration of the sunrise/sunset event'},
'Beta': {'units': 'degrees',
'description': 'angle between the satellite orbit plane and the sun'},
'Trop_Height': {'units': 'km'},
'Radius': {'units': 'microns'},
'SurfDen': {'units': 'microns2 cm-3'}}
for key in attrs.keys():
data[key].attrs = attrs[key]
data.attrs = {'description': 'Retrieved vertical profiles of aerosol extinction, ozone, '
'nitrogen dioxide, water vapor, and meteorological profiles from SAGE II '
'version 7.00',
'publication reference': '<NAME>., <NAME>., <NAME>., & <NAME>. (2013). '
'SAGE version 7.0 algorithm: application to SAGE II. Atmospheric '
'Measurement Techniques, 6(12), 3539-3561.',
'title': 'SAGE II version 7.00',
'date_created': pd.Timestamp.now().strftime('%B %d %Y'),
'source_code': 'repository: https://github.com/LandonRieger/pySAGE.git, revision: '
+ pysagereader.__version__,
'source_data': 'https://eosweb.larc.nasa.gov/project/sage2/sage2_v7_table',
'version': pysagereader.__version__,
'Conventions': 'CF-1.7'}
if self.cf_names:
names = {'Lat': 'latitude',
'Lon': 'longitude',
'Alt_Grid': 'altitude',
'Beta': 'beta_angle',
'Ext': 'aerosol_extinction',
'Ext_Err': 'aerosol_extinction_error',
'O3': 'ozone',
'O3_Err': 'ozone_error',
'NO2': 'no2',
'NO2_Err': 'no2_error',
'SurfDen': 'surface_area_density',
'SurfDen_Err': 'surface_area_density_error',
'radius': 'effective_radius',
'radius_err': 'effective_radius_error',
'Density': 'air_density',
'Density_Err': 'air_density_error',
'Type_Sat': 'satellite_sunset',
'Type_Tan': 'local_sunset',
'Trop_Height': 'tropopause_altitude',
'Duration': 'event_duration'}
for key in names.keys():
try:
data.rename({key: names[key]}, inplace=True)
except ValueError:
pass
return data
@staticmethod
def convert_index_bit_flags(data: Dict) -> xr.Dataset:
"""
Convert the int32 index flags to a dataset of distinct flags
Parameters
----------
data
Dictionary of input data as returned by ``load_data``
Returns
-------
Dataset of the index bit flags
"""
flags = dict()
flags['pmc_present'] = 0
flags['h2o_zero_found'] = 1
flags['h2o_slow_convergence'] = 2
flags['h2o_ega_failure'] = 3
flags['default_nmc_temp_errors'] = 4
flags['ch2_aero_model_A'] = 5
flags['ch2_aero_model_B'] = 6
flags['ch2_new_wavelength'] = 7
flags['incomplete_nmc_data'] = 8
flags['mirror_model'] = 15
flags['twomey_non_conv_rayleigh'] = 19
flags['twomey_non_conv_386_Aero'] = 20
flags['twomey_non_conv_452_Aero'] = 21
flags['twomey_non_conv_525_Aero'] = 22
flags['twomey_non_conv_1020_Aero'] = 23
flags['twomey_non_conv_NO2'] = 24
flags['twomey_non_conv_ozone'] = 25
flags['no_shock_correction'] = 30
f = dict()
for key in flags.keys():
f[key] = (data['InfVec'] & 2 ** flags[key]) > 0
xr_data = []
time = pd.to_timedelta(data['mjd'], 'D') + pd.Timestamp('1858-11-17')
for key in f.keys():
xr_data.append(xr.DataArray(f[key], coords=[time], dims=['time'], name=key))
return xr.merge(xr_data)
@staticmethod
def convert_species_bit_flags(data: Dict) -> xr.Dataset:
"""
Convert the int32 species flags to a dataset of distinct flags
Parameters
----------
data
Dictionary of input data as returned by `load_data`
Returns
-------
Dataset of the index bit flags
"""
flags = dict()
flags['separation_method'] = [0, 1, 2]
flags['one_chan_aerosol_corr'] = 3
flags['no_935_aerosol_corr'] = 4
flags['Large_1020_OD'] = 5
flags['NO2_Extrap'] = 6
flags['Water_vapor_ratio'] = [7, 8, 9, 10]
flags['Cloud_Bit_1'] = 11
flags['Cloud_Bit_2'] = 12
flags['No_H2O_Corr'] = 13
flags['In_Troposphere'] = 14
separation_method = dict()
separation_method['no_aerosol_method'] = 0
separation_method['trans_no_aero_to_five_chan'] = 1
separation_method['standard_method'] = 2
separation_method['trans_five_chan_to_low'] = 3
separation_method['four_chan_method'] = 4
separation_method['trans_four_chan_to_three_chan'] = 5
separation_method['three_chan_method'] = 6
separation_method['extension_method'] = 7
f = dict()
for key in flags.keys():
if hasattr(flags[key], '__len__'):
if key == 'separation_method':
for k in separation_method.keys():
temp = data['ProfileInfVec'] & np.sum([2 ** k for k in flags[key]])
f[k] = temp == separation_method[k]
else:
temp = data['ProfileInfVec'] & np.sum([2 ** k for k in flags[key]])
f[key] = temp >> flags[key][0] # shift flag to save only significant bits
else:
f[key] = (data['ProfileInfVec'] & 2 ** flags[key]) > 0
xr_data = []
time = pd.to_timedelta(data['mjd'], 'D') + | pd.Timestamp('1858-11-17') | pandas.Timestamp |
import sys
import pandas as pd
import numpy as np
import json
import os
from datetime import date
from scipy.stats import linregress
import yaml
from momentum_data import cfg
DIR = os.path.dirname(os.path.realpath(__file__))
pd.set_option('display.max_rows', None)
pd.set_option('display.width', None)
pd.set_option('display.max_columns', None)
try:
with open('config.yaml', 'r') as stream:
config = yaml.safe_load(stream)
except FileNotFoundError:
config = None
except yaml.YAMLError as exc:
print(exc)
PRICE_DATA = os.path.join(DIR, "data", "price_history.json")
ACCOUNT_VALUE = cfg("CASH")
RISK_FACTOR_CFG = cfg("RISK_FACTOR")
RISK_FACTOR = RISK_FACTOR_CFG or 0.002
MAX_STOCKS = cfg("STOCKS_COUNT_OUTPUT")
SLOPE_DAYS = cfg("MOMENTUM_CALCULATION_PAST_DAYS")
POS_COUNT_TARGET = cfg("POSITIONS_COUNT_TARGET")
TITLE_RANK = "Rank"
TITLE_TICKER = "Ticker"
TITLE_SECTOR = "Sector"
TITLE_UNIVERSE = "Universe"
TITLE_MOMENTUM = "Momentum (%)"
TITLE_RISK = "ATR20d"
TITLE_PRICE = "Price"
TITLE_AMOUNT = "Shares"
TITLE_POS_SIZE = "Position ($)"
TITLE_SUM = "Sum ($)"
if not os.path.exists('output'):
os.makedirs('output')
def read_json(json_file):
with open(json_file, "r") as fp:
return json.load(fp)
def momentum(closes):
"""Calculates slope of exp. regression normalized by rsquared"""
returns = np.log(closes)
indices = np.arange(len(returns))
slope, _, r, _, _ = linregress(indices, returns)
# return ((1 + slope) ** 253) * (r**2)
return (((np.exp(slope) ** 252) - 1) * 100) * (r**2)
def atr_20(candles):
"""Calculates last 20d ATR"""
daily_atrs = []
for idx, candle in enumerate(candles):
high = candle["high"]
low = candle["low"]
prev_close = 0
if idx > 0:
prev_close = candles[idx - 1]["close"]
daily_atr = max(high-low, np.abs(high - prev_close), np.abs(low - prev_close))
daily_atrs.append(daily_atr)
return pd.Series(daily_atrs).rolling(20).mean().tail(1).item()
def calc_stocks_amount(account_value, risk_factor, risk_input):
return (np.floor(account_value * risk_factor / risk_input)).astype(int)
def calc_pos_size(amount, price):
return np.round(amount * price, 2)
def calc_sums(account_value, pos_size):
sums = []
sum = 0
stocks_count = 0
for position in list(pos_size):
sum = sum + position
sums.append(sum)
if sum < account_value:
stocks_count = stocks_count + 1
return (sums, stocks_count)
def positions():
"""Returns a dataframe doubly sorted by deciles and momentum factor, with atr and position size"""
json = read_json(PRICE_DATA)
momentums = {}
ranks = []
for ticker in json:
try:
closes = list(map(lambda candle: candle["close"], json[ticker]["candles"]))
if closes:
# calculate gaps of the last 90 days
diffs = np.abs(pd.Series(closes[-SLOPE_DAYS[0]:]).pct_change().diff()).dropna()
gaps = diffs[diffs > 0.15]
ma = pd.Series(closes).rolling(100).mean().tail(1).item()
if ma > closes[-1]:
print("%s is below it's 100d moving average." % ticker)
elif len(gaps):
print("%s has a gap > 15%%" % ticker)
else:
ranks.append(len(ranks)+1)
for slope_days in SLOPE_DAYS:
if not slope_days in momentums:
momentums[slope_days] = []
mmntm = momentum( | pd.Series(closes[-slope_days:]) | pandas.Series |
# kaggleのSMS Spam Collection Datasetでナイーブベイズを体験する
# コード:https://qiita.com/fujin/items/50fe0e0227ef8457a473
import matplotlib.pyplot as pyplot
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import BernoulliNB
# -データ準備-
# CSV読み込み
df = | pd.read_csv("./datasets/spam.csv", encoding="latin-1") | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from src.tasks.preprocessing_funcs import load_dataloaders
from src.tasks.trainer import train_and_fit
from src.tasks.infer import infer_from_trained
import logging
from argparse import ArgumentParser
from src.tasks.visualization import Graph
from src.tasks.pdf_to_txt import PdfToTxt
from src.tasks.merge_relations import MergeRelation
from src.tasks.graph_visualization import GraphVisualization
from src.tasks.get_training_data import get_candidates_for_train
import glob,os
import pandas as pd
'''
This fine-tunes the BERT model on SemEval task
'''
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
logger = logging.getLogger('__file__')
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--train_data", type=str, default='/home/ying/PycharmProjects/biobert/datasets/RE/three_relations/train.tsv', \
help="training data .txt file path")
parser.add_argument("--test_data", type=str, default='/home/ying/PycharmProjects/biobert/datasets/RE/three_relations/test.tsv', \
help="test data .txt file path")
parser.add_argument("--use_pretrained_blanks", type=int, default=0, help="0: Don't use pre-trained blanks model, 1: use pre-trained blanks model")
parser.add_argument("--num_classes", type=int, default=3, help='number of relation classes')
parser.add_argument("--batch_size", type=int, default=8, help="Training batch size")
parser.add_argument("--gradient_acc_steps", type=int, default=1, help="No. of steps of gradient accumulation")
parser.add_argument("--max_norm", type=float, default=1.0, help="Clipped gradient norm")
parser.add_argument("--fp16", type=int, default=0, help="1: use mixed precision ; 0: use floating point 32") # mixed precision doesn't seem to train well
parser.add_argument("--num_epochs", type=int, default=10, help="No of epochs")
parser.add_argument("--lr", type=float, default=2*0.00005, help="learning rate")
parser.add_argument("--model_no", type=int, default=2, help='''Model ID: 0 - BERT\n
1 - ALBERT''')
parser.add_argument("--train", type=int, default=1, help="0: Don't train, 1: train")
parser.add_argument("--infer", type=int, default=1, help="0: Don't infer, 1: Infer")
parser.add_argument("--freeze", type=int, default=0, help='''1: Freeze most layers until classifier layers\
\n0: Don\'t freeze \
(Probably best not to freeze if GPU memory is sufficient)''')
args = parser.parse_args()
if args.train == 1:
net = train_and_fit(args)
print("aaaa")
if args.infer == 1:
inferer = infer_from_trained(args, detect_entities=True)
test2 = "exudry ® , omniderm ® , vigilon ® , duoderm ® , mepitel ® ) may aid in healing and reduce pain ."
pred = inferer.infer_sentence(test2, detect_entities=True)
print(pred)
test3 = "Duplication of this publication or parts thereof is permitted only under the provisions of the Copyright Law of the Publisher’s location, in its current version, and permission for use must always be obtained from Springer."
pred = inferer.infer_sentence(test3, detect_entities=True)
print(pred)
test4 = "moreover , uft has proved to be effective for inoperable advanced malignancies such as colorectal cancer, especially in combination with leucovorin or cisplatin."
pred = inferer.infer_sentence(test4, detect_entities=True)
print(pred)
test4 = "cancer can not be treated by dienogest."
pred = inferer.infer_sentence(test4, detect_entities=True)
print(pred)
test5 = "In more severe cases with high fever or marked prostration , hospitalization may be needed with IV acyclovir , antibiotics , fluids , and pain medications ."
pred = inferer.infer_sentence(test5, detect_entities=True)
print(pred)
test6 ="GIANT CELL TUMOR OF THE TENDON SHEATH A giant cell tumor of the tendon sheath is the most common tumor of the hand and presents with a firm enlarging nodule on the fingers ."
pred = inferer.infer_sentence(test6, detect_entities=True)
print(pred)
test7 = "topical corticosteroids may improve the dermatitis, and chronic administration of oral acyclovir is appropriate for patients with eh "
pred = inferer.infer_sentence(test7, detect_entities=True)
print(pred)
#
# # # sents_for_pretraining =[]
merged = MergeRelation()
for file in glob.glob('data/PDF_FOR_PARSE/*.pdf'):
csv_file = file.replace('.pdf', '.csv')
if os.path.exists(csv_file):
df = pd.read_csv(csv_file)
merged.get_df(df)
else:
file_name = os.path.basename(file)
sents_path = file.replace('.pdf', '_sents.csv')
graph = Graph(file_name)
input_sents = []
if os.path.exists(sents_path):
df_sents = pd.read_csv(sents_path,index_col=0)
for idx, row in df_sents.iterrows():
input_sents.append(row[0])
else:
Pdf2txt = PdfToTxt(file,is_pdf=True)
input_sents = Pdf2txt.get_processed_sents()
sents_df = | pd.DataFrame(input_sents) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import covsirphy as cs
def md(scenario, filename, name=None):
with open(filename, "w") as fh:
fh.write(scenario.summary(name=name).to_markdown())
def main():
print(cs.__version__)
# Data loading
data_loader = cs.DataLoader("input")
jhu_data = data_loader.jhu(verbose=True)
population_data = data_loader.population(verbose=True)
# For Japan
japan_data = data_loader.japan()
jhu_data.replace(japan_data)
print(japan_data.citation)
# Records
snl = cs.Scenario(jhu_data, population_data, country="Japan")
snl.records(filename="records.jpg")
# S-R trend analysis
snl.trend(filename="trend.jpg")
md(snl, "trend.md", "Main")
# Disable
snl.clear(name="A")
snl.disable(phases=["0th", "3rd"], name="A")
md(snl, "A.md", "A")
snl.enable(phases=["0th"], name="A")
md(snl, "A2.md", "A")
# Combine
snl.clear(name="B")
snl.combine(phases=["1st", "5th"], name="B")
md(snl, "B.md", "B")
# Delete 0th phase
snl.clear(name="C")
snl.delete(phases=["0th"], name="C")
md(snl, "C.md", "C")
snl.enable(phases=["0th"], name="C")
md(snl, "C2.md", "C2")
# Delete 3rd phase
snl.clear(name="D")
snl.delete(phases=["3rd"], name="D")
md(snl, "D.md", "D")
# Delete last phase
snl.clear(name="E")
snl.delete(phases=["6th", "7th"], name="E")
md(snl, "E.md", "E")
# Add phase with end_date
snl.add(end_date="31Aug2020", name="E")
md(snl, "E2.md", "E")
# Add phase with days
snl.add(days=10, name="E")
md(snl, "E3.md", "E")
# Add phase with end_date=None and days=None
snl.add(name="E")
md(snl, "E4.md", "E")
# Separate
snl.clear(name="F", template="E")
snl.separate(date="01Apr2020", name="F")
md(snl, "F.md", "F")
# Change
snl.clear(name="G", template="F")
snl.combine(phases=["0th", "1st"], name="G")
snl.separate(date="12Apr2020", name="G")
md(snl, "G.md", "G")
# Optimize change point
candidates = ["01Mar2020", "12Apr2020"]
opt_dict = {date: {} for date in candidates}
snl_opt = cs.Scenario(jhu_data, population_data, country="Japan", tau=720)
snl_opt.trend(show_figure=False)
for date in candidates:
snl_opt.clear(name=date)
snl_opt.combine(phases=["0th", "1st"], name=date)
snl_opt.separate(date=date, name=date)
snl_opt.estimate(cs.SIRF, phases=["0th", "1st"], name=date)
opt_dict[date]["0th"] = snl_opt.get("RMSLE", phase="0th", name=date)
opt_dict[date]["1st"] = snl_opt.get("RMSLE", phase="1st", name=date)
with open("opt.md", "w") as fh:
df = | pd.DataFrame.from_dict(opt_dict, orient="index") | pandas.DataFrame.from_dict |
import re
import pandas as pd
import numpy as np
class Resampler(object):
"""Resamples time-series data from one frequency to another frequency.
"""
min_in_freqs = {
'MIN': 1,
'MINUTE': 1,
'DAILY': 1440,
'D': 1440,
'HOURLY': 60,
'HOUR': 60,
'H': 60,
'MONTHLY': 43200,
'M': 43200,
'YEARLY': 525600
}
def __init__(self, data, freq, how='mean', verbosity=1):
"""
Arguments:
data : data to use
freq : frequency at which to transform/resample
how : string or dictionary mapping to columns in data defining how to resample the data.
"""
data = pd.DataFrame(data)
self.orig_df = data.copy()
self.target_freq = self.freq_in_mins_from_string(freq)
self.how = self.check_how(how)
self.verbosity = verbosity
def __call__(self, *args, **kwargs):
if self.target_freq > self.orig_freq:
# we want to calculate at higher/larger time-step
return self.downsample()
else:
# we want to calculate at smaller time-step
return self.upsamle()
@property
def orig_freq(self):
return self.freq_in_mins_from_string(pd.infer_freq(self.orig_df.index))
@property
def allowed_freqs(self):
return self.min_in_freqs.keys()
def check_how(self, how):
if not isinstance(how, str):
assert isinstance(how, dict)
assert len(how) == len(self.orig_df.columns)
else:
assert isinstance(how, str)
how = {col:how for col in self.orig_df.columns}
return how
def downsample(self):
df = pd.DataFrame()
for col in self.orig_df:
_df = downsample_df(self.orig_df[col], how=self.how[col], target_freq=self.target_freq)
df = pd.concat([df, _df], axis=1)
return df
def upsamle(self, drop_nan=True):
df = pd.DataFrame()
for col in self.orig_df:
_df = upsample_df(self.orig_df[col], how=self.how[col], target_freq=self.target_freq)
df = pd.concat([df, _df], axis=1)
# concatenation of dataframes where one sample was upsampled with linear and the other with same, will result
# in different length and thus concatenation will add NaNs to the smaller column.
if drop_nan:
df = df.dropna()
return df
def str_to_mins(self, input_string: str) -> int:
return self.min_in_freqs[input_string]
def freq_in_mins_from_string(self, input_string: str) -> int:
if has_numbers(input_string):
in_minutes = split_freq(input_string)
elif input_string.upper() in ['D', 'H', 'M', 'DAILY', 'HOURLY', 'MONTHLY', 'YEARLY', 'MIN', 'MINUTE']:
in_minutes = self.str_to_mins(input_string.upper())
else:
raise TypeError("invalid input string", input_string)
return int(in_minutes)
def downsample_df(df, how, target_freq):
if isinstance(df, pd.Series):
df = pd.DataFrame(df)
assert how in ['mean', 'sum']
# from low timestep to high timestep i.e from 1 hour to 24 hour
# For quantities like temprature, relative humidity, Q, wind speed
if how == 'mean':
return df.resample(f'{target_freq}min').mean()
# For quantities like 'rain', solar radiation', evapotranspiration'
elif how == 'sum':
return df.resample(f'{target_freq}min').sum()
def upsample_df(df, how:str, target_freq:int):
"""drop_nan: if how='linear', we may """
# from larger timestep to smaller timestep, such as from daily to hourly
out_freq = str(target_freq) + 'min'
if isinstance(df, pd.Series):
df = pd.DataFrame(df)
col_name = df.columns[0]
nan_idx = df.isna() # preserving indices with nan values
assert df.shape[1] <=1
nan_idx_r = nan_idx.resample(out_freq).ffill()
nan_idx_r = nan_idx_r.fillna(False) # the first value was being filled with NaN, idk y?
data_frame = df.copy()
# For quantities like temprature, relative humidity, Q, wind speed, we would like to do an interpolation
if how == 'linear':
data_frame = data_frame.resample(out_freq).interpolate(method='linear')
# filling those interpolated values with NaNs which were NaN before interpolation
data_frame[nan_idx_r] = np.nan
# For quantities like 'rain', solar radiation', evapotranspiration', we would like to distribute them equally
# at smaller time-steps.
elif how == 'same':
# distribute rainfall equally to smaller time steps. like hourly 17.4 will be 1.74 at 6 min resolution
idx = data_frame.index[-1] + get_offset(data_frame.index.freqstr)
data_frame = data_frame.append(data_frame.iloc[[-1]].rename({data_frame.index[-1]: idx}))
data_frame = add_freq(data_frame)
df1 = data_frame.resample(out_freq).ffill().iloc[:-1]
df1[col_name ] /= df1.resample(data_frame.index.freqstr)[col_name ].transform('size')
data_frame = df1.copy()
# filling those interpolated values with NaNs which were NaN before interpolation
data_frame[nan_idx_r] = np.nan
else:
raise ValueError(f"unoknown method to transform '{how}'")
return data_frame
def add_freq(df, assert_feq=False, freq=None, method=None):
idx = df.index.copy()
if idx.freq is None:
_freq = | pd.infer_freq(idx) | pandas.infer_freq |
import re
import time
import argparse
import numpy as np
import pandas as pd
import util as ut
from collections import Counter
from collections import defaultdict
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import NearestNeighbors
# public
def retrieve_chunk(df, max_size=5000000, chunk_number=0):
if chunk_number == -1:
return df
for i in range(2, 50):
ut.out('splitting into %d chunks...' % i)
dfs = np.array_split(df, i)
if len(dfs[0]) <= max_size:
ut.out('return chunk %d...' % chunk_number)
return dfs[chunk_number]
return df
def retrieve_max_id(in_dir='', chunk_number=0, info_type='text'):
in_col = info_type + '_id'
max_id = 0
if chunk_number > 0:
fname = in_dir + str(chunk_number - 1) + '_' + info_type + '_sim.csv'
df = | pd.read_csv(fname) | pandas.read_csv |
import time
import sorting
import bst
import timeit
import platform
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import heapq
import copy
random.seed(521)
DEFAULT_NUMBER = 100000 # 100k
DEFAULT_POPULATION = range(100000) # 1m
DEFAULT_SIZES = [10, 100, 1000, 10000, 100000]
def _statistify(array):
"""returns mean and confidence interval of a given array of numbers (assuming normality of the sampling
population) """
mean, se = np.mean(array), 2 * (np.std(array) / np.sqrt(len(array)))
return mean, se
class TimeTest(object):
"""returns an object which tests the time efficiency of a multi-dimensional array of increasing sizes of random
integers over functions of sorting, binary tree insertions and deletions and heap insertions/get/removal.
There's the possibility to get a summary of the tests results with CPU information, OS infos and plot of
array size/time of all the executed tests.
Csv file writer of the tests results for statistical purposes has been implemented too.
"""
def __init__(self, array=None, max_val=DEFAULT_NUMBER):
# this is the dict of dicts where the values obtained from the test_it func will be stored.
self.test_result = dict(quick_sort={}, merge_sort={}, binary_insertion={},
binary_insertion_se={}, binary_get_random={}, binary_get_random_se={},
binary_delete={}, binary_delete_se={}, binary_get_max={}, binary_get_max_se={},
heap_insert={}, heap_insert_se={}, heap_get_max={}, heap_get_max_se={},
heap_remove_se={}, heap_remove={})
# dict of all the list to use for testing
self.array_pool = {}
# both for summary() purrrrrposes
self.cpu = platform.processor()
self.os = platform.platform()
self.hashcode = random._sha512().hexdigest()[0:10]
if array is None:
# we generate the arrays of random numbers with a logarithmic distance one with the other
for i in np.logspace(1.0, np.log10(max_val, dtype=float), base=10.0, endpoint=True, dtype=int):
self.array_pool[i] = random.sample(DEFAULT_POPULATION, k=i)
# to use a more accurate logarithmic scale, to improve plotting and statistics quality
elif array is "e_log":
for i in DEFAULT_SIZES:
self.array_pool[i] = random.sample(DEFAULT_POPULATION, k=i)
# optional
# else:
# for lst in array:
# self.array_pool[len(lst)] = lst
print("time test generated!")
def test_it(self):
"""generates a number of arrays of increasing size each of random integers and tests them over
the given functions. Eventually adds the results to the self.test_result dictionary with template:
{function: {size_array: time_mean}, function_se: {size_array: time_confint}}
"""
for key, arr in self.array_pool.items():
# sorting algorithms
self._test_it_quick_sort(key, arr)
self._test_it_merge_sort(key, arr)
print("sorting timing done!")
# BSTs implementation
insertion_counter = 0
global tree
tree = bst.BinarySearchTree()
for bst_key in self.array_pool[DEFAULT_NUMBER]:
insertion_counter += 1
if insertion_counter in self.array_pool.keys():
self._test_it_binary_get_max(insertion_counter)
self._test_it_binary_get_random(insertion_counter)
# testing over 30 randomnumber AND NOT CHANGING THE TREE
self._test_it_binary_insertion_deletion(insertion_counter,
random.sample(self.array_pool[DEFAULT_NUMBER], k=50))
tree.put(bst_key, 0)
else:
# KEY, VALUE
tree.put(bst_key, 0)
print("binary insertion, get random, get max and deletion timing done!")
# heap implementation
global heaper
heaper = []
heap_counter = 0
for heap_key in self.array_pool[DEFAULT_NUMBER]:
heap_counter += 1
if heap_counter in self.array_pool.keys():
# they will add values to heap_temp
self._test_it_heap_get_max(heap_counter)
# NOT CHANGING THE HEAP
self._test_it_heap_insert_delete(heap_counter, random.sample(self.array_pool[DEFAULT_NUMBER], k=50))
heapq.heappush(heaper, heap_key)
else:
heapq.heappush(heaper, heap_key)
print("heap insertion, get max and deletion timing done!")
self._pandator()
def csv(self, name='common'):
"""generates a csv file with the results of the test_it function, returns a "Run test_it before requesting
csv report" if the self.test_result field has not been populated already.
"""
if name is 'common':
name = 'benchmark_analysis_' + self.hashcode + ".csv"
with open(name, 'w') as f:
f.write(self.test_result.to_csv())
print("csv file written! check current directory")
def summary(self, pdf_report=True):
"""prints out a summary with CPU, OS infos, plots of all the tests [array_size/time]
and POSSIBLY a pdf markdown with all of it in a beautiful graphics.
Returns a "Run test_it before requesting a test summary" if the self.test_result
field has not been populated already.
"""
# TODO needs fix with errorscatter
plt.figure(figsize=(8, 11))
plt.suptitle("Benchmark analysis summary", fontsize=24)
plt.subplots_adjust(top=0.88)
# sorting repr
plt.subplot(321)
plt.tight_layout()
plt.title("Quick sort")
plt.grid()
plt.xscale('log')
plt.plot(self.test_result.quick_sort, marker='.', ms=0.99)
plt.ylabel("time")
plt.xlabel("$\log(size)$")
plt.subplot(322)
plt.tight_layout()
plt.grid()
plt.title("Merge Sort")
plt.plot(self.test_result.merge_sort, marker='.', ms=0.99)
plt.ylabel("time")
plt.xlabel("$\log(size)$")
plt.xscale('log')
# binary tree repr
plt.subplot(323)
plt.tight_layout()
plt.grid()
plt.errorbar(self.test_result.binary_delete.index, self.test_result.binary_delete,
self.test_result.binary_delete_se, label="Delete", ecolor='orange',
fmt='b-o', ms=0.99, capsize=2)
plt.errorbar(self.test_result.binary_insertion.index, self.test_result.binary_insertion,
self.test_result.binary_insertion_se, label="Insert",
ecolor='green', fmt='r-o', ms=0.99, capsize=2)
plt.yscale('log')
plt.ylabel("$\log(time)$")
plt.xlabel("$\log(size)$")
plt.xscale('log')
plt.title("Binary insert and binary delete")
plt.legend()
plt.subplot(324)
plt.tight_layout()
plt.grid()
plt.errorbar(self.test_result.binary_get_max.index, self.test_result.binary_get_max,
self.test_result.binary_get_max_se, label="Get max", ecolor='green', fmt='r-o', ms=0.99, capsize=2)
plt.errorbar(self.test_result.binary_get_random.index, self.test_result.binary_get_random,
self.test_result.binary_get_random_se, label="Get random", ecolor='orange', fmt='b-o', ms=0.99,
capsize=2)
plt.ylabel("$\log(time)$")
plt.xlabel("$\log(size)$")
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.title("Binary get max and binary get random")
# heap repr
plt.subplot(325)
plt.tight_layout()
plt.grid()
plt.errorbar(self.test_result.heap_insert.index, self.test_result.heap_insert, self.test_result.heap_insert_se,
label="Insertion", ecolor='green', fmt='r-o', ms=0.99, capsize=2)
plt.errorbar(self.test_result.heap_remove.index, self.test_result.heap_remove, self.test_result.heap_remove_se,
label="Deletion", ecolor='orange', fmt='b-o', ms=0.99, capsize=2)
plt.xscale('log')
plt.yscale('log')
plt.ylabel("$\log(time)$")
plt.xlabel("$\log(size)$")
plt.legend()
plt.title("Heap insertion, deletion")
plt.subplot(326)
plt.tight_layout()
plt.errorbar(self.test_result.index, self.test_result.heap_get_max, self.test_result.heap_get_max_se,
label="Get max", ecolor='red', ms=0.99, capsize=3)
plt.title("Heap get max")
plt.yscale('log')
plt.xscale('log')
plt.ylabel("$\log(time)$")
plt.xlabel("$\log(size)$")
plt.subplots_adjust(top=0.88)
# text with infos
self.print_info()
if pdf_report:
namefile = "benchmark_analysis_" + self.hashcode + ".pdf"
plt.savefig(fname=namefile, papertype='a4', orientation='portrait')
plt.show()
# sorting impl
def _test_it_quick_sort(self, key, arr):
self.test_result['quick_sort'][key] = timeit.timeit("sorting.quick_sort(" + str(arr) + ")", globals=globals(),
number=10)
def _test_it_merge_sort(self, key, arr):
self.test_result['merge_sort'][key] = timeit.timeit("sorting.merge(" + str(arr) + ")", globals=globals(),
number=10)
# bst impl
def _test_it_binary_insertion_deletion(self, key, array):
results_ins, results_del = [], []
for value in array:
results_ins.append(timeit.timeit("tree.put(" + str(value) + ",0)", globals=globals(), number=1))
results_del.append(timeit.timeit("tree.delete(" + str(value) + ")", globals=globals(), number=1))
self.test_result['binary_insertion'][key], self.test_result['binary_insertion_se'][key] = _statistify(
results_ins)
self.test_result["binary_delete"][key], self.test_result["binary_delete_se"][key] = _statistify(results_del)
def _test_it_binary_get_max(self, key):
results = []
for _ in range(50):
results.append(timeit.timeit("tree.findMax()", globals=globals(), number=10))
self.test_result['binary_get_max'][key], self.test_result['binary_get_max_se'][key] = _statistify(results)
def _test_it_binary_get_random(self, key):
results = []
for _ in range(50):
rand_key = random.randint(0, len(tree))
results.append(timeit.timeit("tree.get(" + str(rand_key) + ")", globals=globals(), number=100))
self.test_result['binary_get_random'][key], self.test_result['binary_get_random_se'][key] = _statistify(results)
# heap impl
def _test_it_heap_insert_delete(self, key, array):
results_ins, results_del = [], []
setupline = 'copy_heap = copy.deepcopy(heaper)'
for value in array:
results_ins.append(
timeit.timeit('heapq.heappush(copy_heap,' + str(value) + ')', number=1, globals=globals(),
setup=setupline))
results_del.append(timeit.timeit('heapq.heappop(copy_heap)', number=1, globals=globals(), setup=setupline))
self.test_result['heap_insert'][key], self.test_result['heap_insert_se'][key] = _statistify(results_ins)
self.test_result['heap_remove'][key], self.test_result['heap_remove_se'][key] = _statistify(results_del)
# TODO repair this function
def _test_it_heap_get_max(self, key):
results = []
for _ in range(30):
results.append(timeit.timeit('heaper[0]', number=10, globals=globals()))
self.test_result['heap_get_max'][key], self.test_result['heap_get_max_se'][key] = _statistify(results)
# deprecated
def _test_it_heap_del_max(self, key):
self.test_result['heap_remove'][key] = timeit.timeit('heapq.heappop(heaper)', number=1, globals=globals())
# TODO fix this function
def _pandator(self):
"""transform the collected results from dict to pandas DataFrames, merging them together into a unique Df"""
# noinspection PyTypeChecker
self.test_result = | pd.DataFrame.from_dict(self.test_result) | pandas.DataFrame.from_dict |
r"""
Baseline Calculation
"""
# Standard Library imports
import argparse
import cartopy.crs as ccrs
import datetime
import h5py
import json
import matplotlib.colors
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import netCDF4
import numpy as np
import os
import pandas as pd
import re
import scipy.optimize
import warnings
import sys
import xarray as xr
# Third party imports
from collections import OrderedDict
# Semi-local imports
import name_qch4_couple.io
import name_qch4_couple.name
import name_qch4_couple.plot
import name_qch4_couple.plot_h2
import name_qch4_couple.region_EU
import name_qch4_couple.routines
import name_qch4_couple.util
# Local imports
import routines
import chem_co
# =============================================================================
# Settings
# =============================================================================
# Argument Parser
parser = argparse.ArgumentParser()
parser.add_argument("-site", required=True)
parser.add_argument("-species", required=True)
parser.add_argument("-year", required=True, type=int)
parser.add_argument("-window1", required=True, type=int)
parser.add_argument("-window2", required=True, type=int)
parser.add_argument("-force", required=True, type=int)
parser.add_argument("-odir", required=True)
args = parser.parse_args()
site = args.site
year = args.year
species = args.species
window1 = args.window1
window2 = args.window2
force_compute = bool(args.force)
odir = args.odir
#mode = 'all'
site_ref = 'mhd' # HDF reference key
p = 101325 # Pa
T = 288.15 # K
ofile_con = f'condition-{site}-{species}-{year}.nc'
ofile_fil = f'filtered-{site}-{species}-{year}.nc'
ofile_fin = f'baseline-{site}-{species}-{year}.nc'
long_names = OrderedDict()
locations = OrderedDict()
with open(f'inputs/baseline/{site}.json', 'r') as f:
st_info = json.load(f)
long_names[site] = st_info['long_name']
locations[site] = st_info['location']
site1 = site.replace('_', '-')
date_nodash = 'REPLACE'
if species == 'ch4':
#from chem_co import read_Q, read_obs, read_baseline
var_name="chi_CH4"
Dfile = (
f'inputs/baseline/footprints_mhd/'
f'{site1}_UKV_EUROPE_{date_nodash}.nc'
)
Q2obs = 1.e9 / 16.043 # M_H2 (g mol-1) - IUPAC
ylabel = u'$\chi$ CH$_{4}$ (nmol mol$^{-1}$)'
ylim = [1800., 2400.]
yticks = np.arange(1800., 2400., 50.)
var_long_name = 'mole_fraction_of_hydrogen'
var_units = 'nmol mol-1'
# =============================================================================
# Pre-processing
# =============================================================================
print(f'Initialising')
print(f' site = {site}')
print(f' year = {year}')
# Dates
dt1 = pd.to_timedelta(window1//2, 'H')
dt2 = pd.to_timedelta(window2//2, 'H')
dt_large = max([dt1, dt2]) * 3
dates_tHour, dates_tDay = (
pd.date_range(
pd.to_datetime(f'{year}') - dt_large,
pd.to_datetime(f'{int(year) + 1}') + dt_large,
freq='1H',
closed='left'
)
for freq in ['1H', '1D']
)
dates_tMonth = pd.date_range(
(pd.to_datetime(f'{year}') - dt_large).strftime('%Y-%m'),
(pd.to_datetime(f'{int(year) + 1}') + dt_large).strftime('%Y-%m'),
freq='1MS',
#closed='left'
)
# Grid
grid_info = routines.define_grid()
dlat = grid_info['dlat']
nlat = grid_info['nlat']
nlon = grid_info['nlon']
area = grid_info['area']
grid_centre = grid_info['grid_centre']
grid_vertex = grid_info['grid_vertex']
nlat_odd = bool(nlat % 2)
nlon_odd = bool(nlon % 2)
nlat_half = nlat // 2
nlon_half = nlon // 2
# Fooprints
#print('Read Footprints')
def read_D(Dfile):
h1 = 3.e3
h2 = 8.e3
with xr.open_dataset(Dfile) as ds_read:
with ds_read.load() as Din:
# time
nt = Din.time.size
# latlon
nlat = Din.lat.size
nlat_odd = bool(nlat % 2)
nlat_half = nlat // 2
nlon = Din.lon.size
nlon_odd = bool(nlon % 2)
nlon_half = nlon // 2
# height
lt = Din.height.values < h1
ut = (h1 <= Din.height.values) & (Din.height.values < h2)
st = h2 <= Din.height.values
# Footprint (lat, lon, time)
D = Din.fp.transpose('time', 'lat', 'lon').values
# End locations (height, lat/lon, time)
end = np.zeros((nt, 17))
endn = Din.particle_locations_n.transpose('time', 'height', 'lon'
).values
ends = Din.particle_locations_s.transpose('time', 'height', 'lon'
).values
ende = Din.particle_locations_e.transpose('time', 'height', 'lat'
).values
endw = Din.particle_locations_w.transpose('time', 'height', 'lat'
).values
end[:, 0] += endn[:, lt, -nlon_half:].sum((1, 2))
end[:, 1] += ende[:, lt, :+nlat_half].sum((1, 2))
end[:, 2] += ende[:, lt, -nlat_half:].sum((1, 2))
end[:, 3] += ends[:, lt, :+nlon_half].sum((1, 2))
end[:, 4] += ends[:, lt, -nlon_half:].sum((1, 2))
end[:, 5] += endw[:, lt, :+nlat_half].sum((1, 2))
end[:, 6] += endw[:, lt, -nlat_half:].sum((1, 2))
end[:, 7] += endn[:, lt, :+nlon_half].sum((1, 2))
end[:, 8] += endn[:, ut, -nlon_half:].sum((1, 2))
end[:, 9] += ende[:, ut, :+nlat_half].sum((1, 2))
end[:, 10] += ende[:, ut, -nlat_half:].sum((1, 2))
end[:, 11] += ends[:, ut, :+nlon_half].sum((1, 2))
end[:, 12] += ends[:, ut, -nlon_half:].sum((1, 2))
end[:, 13] += endw[:, ut, :+nlat_half].sum((1, 2))
end[:, 14] += endw[:, ut, -nlat_half:].sum((1, 2))
end[:, 15] += endn[:, ut, :+nlon_half].sum((1, 2))
end[:, 16] += (
endn[:, st].sum((1, 2))
+ ende[:, st].sum((1, 2))
+ ends[:, st].sum((1, 2))
+ endw[:, st].sum((1, 2))
)
if nlon_odd:
end[:, 0] += endn[:, lt, nlon_half].sum((1,)) / 2
end[:, 1] += ende[:, lt, nlat_half].sum((1,)) / 2
end[:, 2] += ende[:, lt, nlat_half].sum((1,)) / 2
end[:, 3] += ends[:, lt, nlon_half].sum((1,)) / 2
end[:, 4] += ends[:, lt, nlon_half].sum((1,)) / 2
end[:, 5] += endw[:, lt, nlat_half].sum((1,)) / 2
end[:, 6] += endw[:, lt, nlat_half].sum((1,)) / 2
end[:, 7] += endn[:, lt, nlon_half].sum((1,)) / 2
end[:, 8] += endn[:, ut, nlon_half].sum((1,)) / 2
end[:, 9] += ende[:, ut, nlat_half].sum((1,)) / 2
end[:, 10] += ende[:, ut, nlat_half].sum((1,)) / 2
end[:, 11] += ends[:, ut, nlon_half].sum((1,)) / 2
end[:, 12] += ends[:, ut, nlon_half].sum((1,)) / 2
end[:, 13] += endw[:, ut, nlat_half].sum((1,)) / 2
end[:, 14] += endw[:, ut, nlat_half].sum((1,)) / 2
end[:, 15] += endn[:, ut, nlon_half].sum((1,)) / 2
return D, end
# Observations
def r_decc(fpath):
odata = pd.read_csv(
fpath,
usecols=lambda x: x.lower() in ['time', 'ch4_ppb'],
index_col=['time'],
skipinitialspace=True,
parse_dates=['time']
).dropna()
# odata = odata.dropna()
odata.columns = odata.columns.str.lower()
return odata
def read_obs(timestamps, site, resample=False):
date = timestamps[0].strftime('%Y-%m')
t0 = timestamps[0].strftime('%Y-%m-%d %H')
t1 = timestamps[-1].strftime('%Y-%m-%d %H')
if site == 'WAO':
ifile = 'inputs/obs/WAO_H2_oct2021.csv'
col_or_no = 'h2_ppb'
sigma_col_or_no = 0.2
elif site == 'MHD_10magl':
ifile = 'inputs/baseline/MHD_2018.csv'
col_or_no = 'ch4_ppb'
sigma_col_or_no = 0.2
else:
ifile = False
col_or_no = np.nan
sigma_col_or_no = np.nan
if ifile:
all_obs_raw = r_decc(ifile).sort_index().loc[t0:t1]
obs_raw = all_obs_raw[col_or_no]
sigma_obs_raw = (all_obs_raw[sigma_col_or_no]
if isinstance(sigma_col_or_no, str) else
pd.Series(sigma_col_or_no, index=all_obs_raw.index))
if isinstance(col_or_no, str):
obs = (obs_raw
if resample is False else
obs_raw.resample('1H').mean().reindex(timestamps))
else:
obs = pd.Series(col_or_no, index=timestamps)
if isinstance(sigma_col_or_no, str) or isinstance(col_or_no, str):
sigma_obs = (
sigma_obs_raw
if resample is False else
sigma_obs_raw.resample('1H').apply(
lambda x: np.sum(x**2)).reindex(timestamps))
else:
sigma_obs = pd.Series(sigma_col_or_no, index=timestamps)
return obs, sigma_obs
obs, sigma_obs = read_obs(dates_tHour, site)
# Filters - Footprint (should not exceed)
filter1 = {
k: np.zeros((nlat, nlon)) for k in ['f_local', 'f_europe', 'f_south']
}
## Local
radius = 2
st_idx = [np.abs(grid_centre[i] - l).argmin() for i, l in enumerate(locations[site])]
filter1['f_local'][
st_idx[1]-radius:st_idx[1]+radius+1,
st_idx[0]-radius:st_idx[0]+radius+1
] = 1.
## Europe
filter1['f_europe'][
np.load('europe.npy', 'r')
] = 1.
## Southern
lat_south = 30.
filter1['f_south'][grid_centre[1] + 0.5*dlat <= lat_south] = 1.
## Population
# filter1['f_pop'] =
## Thresholds
filter_threshold = {k: 1. for k in filter1}
filter_threshold['f_local'] = 10
filter_threshold['f_europe'] = 5
filter_threshold['f_south'] = 2
# Dilution sensitivity limit
#dsl = 3.4 * 1.e6 / p * 8.314 * T
dsl = 3.4 / p * 8.314 * T
# Filters - Domain borders (should exceed)
filter2 = {
'p_nw_all': [0, 5, 6, 7, 8, 13, 14, 15], # default
}
## Thresholds
filter_threshold['p_nw_all'] = 0.8
# =============================================================================
# Main Process
# =============================================================================
print("Processing Main Routine")
filters = list(filter1) + list(filter2)
flag_con = (ofile_con
and os.path.exists(os.path.join(odir, ofile_con))
and not force_compute)
if flag_con:
try:
with xr.open_dataset(os.path.join(odir, ofile_con)) as ds_read:
with ds_read.load() as ds:
if any([i not in ds.variables for i in filter_threshold]):
flag_con = False
except:
flag_con = False
# Get conditions
if flag_con:
print(" Using pre-existing filters")
with xr.open_dataset(os.path.join(odir, ofile_con)):
with ds_read.load() as ds:
condition = pd.concat([
ds[i].to_series() for i in filter_threshold
], axis=1)
idx_H = condition.index
else:
print(" Applying filters")
idx_H = dates_tHour
condition = pd.DataFrame(0, index=idx_H, columns=filters)
for month in dates_tMonth:
print(f" {month}")
month_idx = month.strftime('%Y-%m')
try:
hoursinmonth = pd.date_range(
month,
month + pd.offsets.MonthBegin(1),
freq='1H',
closed='left'
)
Dfile_in = re.sub(date_nodash, month.strftime('%Y%m'), Dfile)
D, end_loc = (i[hoursinmonth.isin(dates_tHour)] for i in read_D(Dfile_in))
condition.loc[month_idx, filter1] = pd.DataFrame(
{k: (D * v).sum((1, 2)) for k, v in filter1.items()},
index=condition.loc[month_idx].index
)
condition.loc[month_idx, filter2] = pd.DataFrame(
{k: end_loc[:, v].sum((1,)) for k, v in filter2.items()},
index=condition.loc[month_idx].index
)
except:
condition.loc[month, filter1] = np.inf
condition.loc[month, filter2] = -np.inf
name_qch4_couple.io.w_nc(
dim={
'time': [
(
dates_tHour
- pd.to_datetime('1990-01')
).astype('timedelta64[s]'),
{'size': None},
{'datatype': np.float64, 'zlib': True},
{
'units': f'seconds since {pd.to_datetime("1990-01")}',
'long_name': 'time',
'calendar': 'gregorian',
}
],
},
var={
**{
k: [
condition[k].values,
{
'dimensions': ('time'), 'datatype': np.float64,
'zlib': True
},
{
'units': var_units,
'long_name': var_long_name
}
]
for k in filter_threshold
},
},
ofile=os.path.join(odir, ofile_con)
)
# Check if baseline condition
filtered = pd.DataFrame(0, index=idx_H, columns=filters)
for k in filter1:
threshold = filter_threshold[k]
filtered.loc[condition[k] / dsl <= threshold, k] = 1.
for k in filter2:
threshold = filter_threshold[k]
filtered.loc[condition[k] >= threshold, k] = 1.
# Baseline
filters_fin = ['f_local', 'f_europe', 'f_south', 'p_nw_all']
oidx = obs.index
nidx = oidx.floor('H')
obs_filter = filtered.reindex(nidx).reset_index(drop=True)
obs_filter.index = oidx
baseline0 = obs[obs_filter[filters_fin].min(1) == 1.]
baseline0_sigma = sigma_obs[obs_filter[filters_fin].min(1) == 1.]
#baseline0 = baseline0.loc[baseline0.notnull()]
#baseline0_sigma = baseline0_sigma.loc[baseline0.notnull()]
if not force_compute and os.path.exists(os.path.join(odir, ofile_fin)):
print(" Reading baselines")
with xr.open_dataset(os.path.join(odir, ofile_fin)) as ds_read:
with ds_read.load() as ds:
baseline2 = ds[var_name].to_series()
baseline2_var = ds[f'var_{var_name}'].to_series()
else:
print(" Processing baselines")
warnings.simplefilter('ignore', np.RankWarning)
check = pd.DataFrame(0., index=dates_tHour, columns=[])
meas_on_time = dates_tHour.intersection(baseline0.index)
window1bef = dates_tHour.to_series().apply(
lambda x: baseline0.loc[x - dt1//2 : x].size
)
window1aft = dates_tHour.to_series().apply(
lambda x: baseline0.loc[x : x + dt1//2].size
)
window1minhalfwidth = dates_tHour.to_series().apply(
lambda x: 2 if x in meas_on_time else 1
)
window1bef_more = window1minhalfwidth - window1bef
window1aft_more = window1minhalfwidth - window1aft
window1start = dates_tHour.to_series().apply(lambda x: (
x - dt1
if window1bef.loc[x] > window1minhalfwidth.loc[x] else
baseline0.loc[:x - dt1].iloc[-window1bef_more[x]:].first_valid_index()
if baseline0.loc[:x - dt1].iloc[-window1bef_more[x]:].size else
dates_tHour[0]
))
window1end = dates_tHour.to_series().apply(
lambda x: (
x + dt1
if window1aft.loc[x] > window1minhalfwidth.loc[x] else
baseline0.loc[x + dt1:].iloc[:window1aft_more[x]].last_valid_index()
if baseline0.loc[x + dt1:].iloc[:window1aft_more[x]].size else
dates_tHour[-1]
))
def fit_func(value, sigma, time):
nfront = value.loc[:time].size
nback = value.loc[time:].size
if value.size >= 2 and all([nfront, nback]):
order = min(value.size, 2)
t2H = (value.index - time).to_series() / pd.Timedelta(hours=1)
fit = scipy.optimize.curve_fit(
(
(lambda x, a, b, c: a + b*x + c*x**2)
if order == 2 else
(lambda x, a, b: a + b*x)
),
t2H.values, value.values,
sigma=sigma.values,
absolute_sigma=True,
method='trf'
)
intercept = fit[0][0]
variance = fit[1][0, 0] + np.var(value)
elif value.size:
intercept = value.values[0]
variance = sigma.values[0]
else:
intercept = np.nan
variance = np.nan
return pd.Series([intercept, variance], index=['intercept', 'variance'])
baseline1 = dates_tHour.to_series().apply(lambda x:
fit_func(baseline0.loc[window1start[x]:window1end[x]],
baseline0_sigma.loc[window1start[x]:window1end[x]],
x))
baseline1.interpolate('time', inplace=True, limit_direction='both')
baseline2 = baseline1['intercept'].rolling(
window2+1, center=True
).mean().interpolate('time', limit_direction='both')
baseline2_var = baseline1['variance'].rolling(
window2+1, center=True
).mean().interpolate('time', limit_direction='both')
name_qch4_couple.io.w_nc(
dim={
'time': [
(
dates_tHour[dates_tHour.year == year]
- pd.to_datetime('1990-01')
).astype('timedelta64[s]'),
{'size': None},
{'datatype': np.float64, 'zlib': True},
{
'units': f'seconds since { | pd.to_datetime("1990-01") | pandas.to_datetime |
# this class is aimed at generating data for the redundant and noisy contexts
import os
import numpy as np
import pandas as pd
from os.path import join
import random
from runs.experiments import Experiment
def generate_tr_vl_ts_splits(id,
source_path,
split_path,
n_tr=3800,
n_vl=872,
n_ts=1821):
""" Here we generate the train, validation,
and test split starting from the training set of the SST2 only.
The data are available at
https://github.com/CS287/HW1/tree/master/data
We save the three splits into three different files
:param id: the id of the experiment, not necessary for computation
:param source_path: where to retrieve the original splits
:param split_path: where to save the result
:param n_vl: number of validation examples
:param n_ts: number of test examples
"""
train_sentences = pd.read_csv(join(source_path, 'train.csv'), index_col=0)
train_labels = np.load(join(source_path, 'train.npy'))
file_names = ['train', 'valid', 'test']
div = np.array([n_tr // 2, n_vl // 2, n_ts // 2])
splits = np.cumsum(div)
seed = 10
pos_id = np.argwhere(train_labels == 1).squeeze()
neg_id = np.argwhere(train_labels == 0).squeeze()
random.Random(seed).shuffle(pos_id)
random.Random(seed).shuffle(neg_id)
splits_pos = np.split(pos_id, splits) # here we split in the three dataset
splits_neg = np.split(neg_id, splits)
for p_, n_, name_ in zip(splits_pos, splits_neg, file_names):
df = | pd.concat([train_sentences.loc[p_], train_sentences.loc[n_]]) | pandas.concat |
import argparse
import json
import os
import re
from glob import glob
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style("dark")
def parse_args():
description = """Measure how you spend time by tracking key presses and
changes to window focus or title."""
p = argparse.ArgumentParser(description=description)
p.add_argument("-b", "--between", help="Times in 'HH:MM HH:MM' format.")
p.add_argument("-c", "--categories", help="Category dictionary (JSON).")
p.add_argument("-d", "--data", nargs="*", help="Data files to load.")
p.add_argument("-e", "--end", help="Date in YYYY-MM-DD format.")
p.add_argument("-s", "--start", help="Date in YYYY-MM-DD format.")
idle_help = "Max minutes between events before you're considered idle."
p.add_argument("-i", "--idle", help=idle_help, default=10, type=int)
freq_help = "Grouping frequency. See pandas freq docs for format."
p.add_argument("-f", "--freq", help=freq_help, default="60min")
return p.parse_args()
def load_df(paths=None, freq="60min"):
if not paths:
home = os.environ.get("HOME")
path = os.path.join(home, ".config", "dowut", "data", "data_*.csv")
paths = sorted(glob(path))
df = pd.concat(pd.read_csv(path, parse_dates=["time"]) for path in paths)
df.sort_values("time", inplace=True)
df.set_index("time", drop=False, inplace=True)
# If a duration straddles a grouping boundary (say we group hourly and the
# event lasted from from 5:59 to 6:01), we need one minute allocated to the
# 5:00 hour and one minute to the 6:00 hour. To make the calculations more
# straightforward, we insert fake events at the grouping boundaries.
l = df.time.min().floor(freq)
u = df.time.max().ceil(freq)
r = pd.date_range(l, u, freq=freq, closed="left")
s = pd.Series(r)
idx = pd.concat([s, df.time]).sort_values()
df = df.reindex(idx)
df["time"] = df.index
# Insert a fake event type at "boundary" times. It inherits all values
# except event_type from the previous row. This ensures the last event from
# the previous group crosses the boundary while still having its duration
# correctly allocated to both groups.
df["event_type"].fillna("Fake", inplace=True)
df.fillna(method="ffill", inplace=True)
df.fillna("Fake", inplace=True)
return df
def load_categories(path):
if not path:
home = os.environ.get("HOME")
path = os.path.join(home, ".config", "dowut", "categories.json")
try:
with open(path) as f:
return json.load(f)
except:
return {}
def plot_active(df, ax, freq, title, normalize=False, max_idle=None):
df = df.copy()
delta = df.shift(-1).time - df.time
df["delta"] = delta.apply(lambda d: d.total_seconds() / 60)
if max_idle:
df["delta"] = df["delta"].apply(lambda d: d if d <= max_idle else 0)
grouper = | pd.Grouper(key="time", freq=freq) | pandas.Grouper |
from __future__ import print_function
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import color, feature, filters, io, measure, morphology, segmentation, img_as_ubyte, transform
import warnings
import math
import pandas as pd
import argparse
import subprocess
import re
import glob
from skimage.segmentation import clear_border
from ortools.graph import pywrapgraph
import time
def buildFeatureFrame(filename,timepoint):
temp = np.asarray(np.load(filename,allow_pickle=True)).item()
imfilename = temp['filename']
img = io.imread(imfilename);
masks = clear_border(temp['masks'])
image_props = measure.regionprops_table(masks,
intensity_image=img,
properties=('label','area','filled_area', 'centroid',
'eccentricity','mean_intensity'))
im_df = pd.DataFrame(image_props)
im_df['time'] = timepoint
return(im_df)
def generateCandidates(image1, image2, im1_select, dist_multiplier=2):
delX = np.sqrt((image1['centroid-0'][im1_select]-image2['centroid-0'])**2+
(image1['centroid-1'][im1_select]-image2['centroid-1'])**2)
max_dist = dist_multiplier*min(delX)
candidates = np.array(delX[delX < max_dist].index)
return(candidates)
def generateLinks(filename_t0, filename_t1,timepoint, nnDist = 10,costMax=35, mN_Int = 10, mN_Ecc=4, mN_Area=25, mN_Disp=1):
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
arr = pd.DataFrame()
for i in np.array(ip0.index):
candidates = generateCandidates(ip0, ip1, i, dist_multiplier=nnDist)
canFRAME = pd.DataFrame(candidates)
canFRAME["1"] = i
arr = arr.append(canFRAME)
arr = arr.rename(columns={0: "t1", "1": "t0"})
arr = arr.reset_index(drop=True)
properties = pd.DataFrame()
mInt_0 = float(np.median(ip0.loc[:,['mean_intensity']]))
mInt_1 = float(np.median(ip1.loc[:,['mean_intensity']]))
for link in np.array(arr.index):
tmp_props_0 = (ip0.loc[arr.loc[link,["t0"]],:])
tmp_props_1 = (ip1.loc[arr.loc[link,["t1"]],:])
deltaInt = (np.abs((int(tmp_props_0["mean_intensity"])/mInt_0)-(int(tmp_props_1["mean_intensity"])/mInt_1))/
np.mean([(int(tmp_props_0["mean_intensity"])/mInt_0),(int(tmp_props_1["mean_intensity"])/mInt_1)]))
deltaArea = (np.abs(int(tmp_props_0['area']) - int(tmp_props_1['area']))/
np.mean([int(tmp_props_0["area"]),int(tmp_props_1["area"])]))
deltaEcc = np.absolute(float(tmp_props_0['eccentricity']) - float(tmp_props_1['eccentricity']))
deltaX = np.sqrt((int(tmp_props_0['centroid-0'])-int(tmp_props_1['centroid-0']))**2+
(int(tmp_props_0['centroid-1'])-int(tmp_props_1['centroid-1']))**2)
properties = properties.append(pd.DataFrame([int(tmp_props_0['label']),int(tmp_props_1['label']),
deltaInt ,deltaArea,deltaEcc,deltaX]).T)
properties = properties.rename(columns={0: "label_t0", 1: "label_t1", 2: "deltaInt",
3: "deltaArea", 4: "deltaEcc", 5: "deltaX"})
properties = properties.reset_index(drop=True)
properties["Cost"]=(properties.loc[:,"deltaInt"]*mN_Int)+(properties.loc[:,"deltaEcc"]*mN_Ecc)+(properties.loc[:,"deltaArea"]*mN_Area)+(properties.loc[:,"deltaX"]*mN_Disp)
properties["TransitionCapacity"]=1
properties = properties.loc[properties["Cost"]<costMax]
properties = properties.reset_index(drop=True)
return(properties)
def DivSimScore(daughterCell_1, daughterCell_2, FrameNext):
daughterStats_1 = FrameNext[(FrameNext['label'] == daughterCell_1)]
daughterStats_2 = FrameNext[(FrameNext['label'] == daughterCell_2)]
deltaInt = (np.abs((int(daughterStats_1["mean_intensity"]))-(int(daughterStats_2["mean_intensity"])))/
np.mean([(int(daughterStats_1["mean_intensity"])),(int(daughterStats_2["mean_intensity"]))]))
deltaArea = (np.abs(int(daughterStats_1['area']) - int(daughterStats_2['area']))/
np.mean([int(daughterStats_1["area"]),int(daughterStats_2["area"])]))
deltaEcc = np.absolute(float(daughterStats_1['eccentricity']) - float(daughterStats_2['eccentricity']))
deltaX = np.sqrt((int(daughterStats_1['centroid-0'])-int(daughterStats_2['centroid-0']))**2+
(int(daughterStats_1['centroid-1'])-int(daughterStats_2['centroid-1']))**2)
sims = pd.DataFrame([int(daughterCell_1),int(daughterCell_2),
deltaInt ,deltaArea,deltaEcc,deltaX]).T
sims = sims.rename(columns={0: "label_D1", 1: "label_D2", 2: "D2deltaInt",
3: "D2deltaArea", 4: "D2deltaEcc", 5: "D2deltaX"})
return(sims)
def DivSetupScore(motherCell, daughterCell_1, daughterCell_2, FrameCurr, FrameNext):
#determine similarities between mother and daughters
simDF = DivSimScore(daughterCell_1, daughterCell_2, FrameNext)
#determine relative area of mother compared to daughters
MotherArea = int(FrameCurr[(FrameCurr['label'] == motherCell)]['area'])
daughterArea_1 = int(FrameNext[(FrameNext['label'] == daughterCell_1)]['area'])
daughterArea_2 = int(FrameNext[(FrameNext['label'] == daughterCell_2)]['area'])
areaChange = MotherArea/(daughterArea_1 + daughterArea_2)
simDF["MDDeltaArea"] = areaChange
return(simDF)
def DivisionCanditates(propMtx, filename_t0,filename_t1,timepoint,mS_Area = 10, mS_Ecc = 2, mS_Int = 2, mS_Disp = 1, MDAR_thresh = 0.75, SDis_thresh = 20.0):
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
Mothers = np.unique(propMtx.loc[:,['label_t0']])
DivCandidacy = pd.DataFrame()
for cell in Mothers:
DaughtersPossible = (propMtx[(propMtx['label_t0'] == cell)].loc[:,'label_t1'])
DaughtersPairs = np.array(np.meshgrid(DaughtersPossible, DaughtersPossible)).T.reshape(-1,2)
Sisters = np.unique(np.sort(DaughtersPairs),axis=0)
for pair in range(Sisters.shape[0]):
if (Sisters[pair,0] != Sisters[pair,1]):
tmpScoreSetup = (DivSetupScore(cell,Sisters[pair,0], Sisters[pair,1], ip0,ip1))
LogicMDAR = (tmpScoreSetup["MDDeltaArea"]>MDAR_thresh)
ScoreSDis = (mS_Int*tmpScoreSetup["D2deltaInt"]) + (mS_Area*tmpScoreSetup["D2deltaArea"]) + (mS_Ecc*tmpScoreSetup["D2deltaEcc"]) + (mS_Disp*tmpScoreSetup["D2deltaX"])
LogicSDis = (ScoreSDis<SDis_thresh)
tmpCandidacy = pd.DataFrame([cell,Sisters[pair,0],Sisters[pair,1],(LogicSDis&LogicMDAR).bool()]).T
DivCandidacy = DivCandidacy.append(tmpCandidacy)
DivCandidacy = DivCandidacy.rename(columns={0: "Mother", 1: "Daughter1", 2: "Daughter2",3: "Div"})
DivCandidacy = DivCandidacy.reset_index(drop=True)
# select true values
DivSelect = DivCandidacy[(DivCandidacy['Div'] == True)]
DivConnects_1 = DivSelect[['Mother','Daughter1','Div']]
DivConnects_2 = DivSelect[['Mother','Daughter2','Div']]
DivConnects_1 = DivConnects_1.rename(columns={'Mother': "label_t0", 'Daughter1': "label_t1"})
DivConnects_2 = DivConnects_2.rename(columns={'Mother': "label_t0", 'Daughter2': "label_t1"})
DivConnects = pd.concat([DivConnects_1,DivConnects_2])
DivConnects = DivConnects.reset_index(drop=True)
return(DivConnects)
def UpdateConnectionsDiv(propMtx,DivCandidatesMtx):
propMtx.loc[propMtx['label_t0'].isin(np.unique(DivCandidatesMtx['label_t0'])),['TransitionCapacity']] = 2
for div in range(DivCandidatesMtx.shape[0]):
tmp_prop = propMtx.loc[(DivCandidatesMtx.loc[div,'label_t0'] ==propMtx['label_t0'])&(DivCandidatesMtx.loc[div,'label_t1'] ==propMtx['label_t1']),]
old_score = float(tmp_prop.loc[:,'Cost'])
new_score = (old_score/2)
propMtx.loc[(DivCandidatesMtx.loc[div,'label_t0'] ==propMtx['label_t0'])&(DivCandidatesMtx.loc[div,'label_t1'] ==propMtx['label_t1']),'Cost'] = new_score
return(propMtx)
def SolveMinCostTable(filename_t0, filename_t1, DivisionTable,timepoint, OpeningCost = 30, ClosingCost = 30):
#rename
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip0 = ip0.rename(columns={"label" : "label_t0"})
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
ip1 = ip1.rename(columns={"label" : "label_t1"})
ip0["slabel_t0"] = np.array(range(ip0.label_t0.shape[0]))+1
i0max = np.max(np.asarray(ip0["slabel_t0"]))
ip1["slabel_t1"] = np.array(range(i0max,i0max+ip1.label_t1.shape[0]))+1
i1max = np.max(np.asarray(ip1["slabel_t1"]))
i0_translation = ip0[["label_t0","slabel_t0"]]
i1_translation = ip1[["label_t1","slabel_t1"]]
result_tmp = pd.merge(DivisionTable, i0_translation, on=['label_t0'])
result = pd.merge(result_tmp, i1_translation, on=['label_t1'])
result_shorthand = result[['slabel_t0','slabel_t1','Cost','TransitionCapacity']]
transNodes0 = np.array(result_shorthand['slabel_t0']) ;
transNodes1 = np.array(result_shorthand['slabel_t1']) ;
transCosts = np.array(result_shorthand['Cost']) ;
transCaps = np.repeat(1,transNodes0.size) ;
sourceNodes0 = np.repeat([0],i1max)
sourceNodes1 = np.array(range(i1max))+1
sourceCosts = np.concatenate((np.repeat(1,ip0.shape[0]),np.repeat(OpeningCost,ip1.shape[0])), axis=None)
#Source capacities are dictates by which node could be splitting. Source capacity = 2 if there was a division candidate
tmpUnique0 = result_shorthand[["slabel_t0","TransitionCapacity"]].drop_duplicates()
HighCaps = tmpUnique0.loc[tmpUnique0["TransitionCapacity"]==2,]
LowCaps = pd.DataFrame(i0_translation).copy(deep=True)
LowCaps['Cap'] = 1
LowCaps.loc[LowCaps['slabel_t0'].isin(np.array(HighCaps['slabel_t0'])),'Cap'] = 2
sourceCaps = np.concatenate((np.array(LowCaps['Cap']),np.repeat(1,ip1.shape[0])), axis=None)
sinkNodes0 = np.array(range(i1max))+1
sinkNodes1 = np.repeat([i1max+1],i1max)
sinkCosts = np.concatenate((np.repeat(ClosingCost,ip0.shape[0]),np.repeat(1,ip1.shape[0])), axis=None)
sinkCaps = np.repeat(1,i1max)
# Define the directed graph for the flow.
min_cost_flow = pywrapgraph.SimpleMinCostFlow()
start_nodes = np.concatenate((sourceNodes0, transNodes0, sinkNodes0)).tolist()
end_nodes = np.concatenate((sourceNodes1, transNodes1, sinkNodes1)).tolist()
capacities = np.concatenate((sourceCaps, transCaps, sinkCaps)).tolist()
costs = np.concatenate((sourceCosts, transCosts, sinkCosts)).tolist()
source = 0
sink = i1max+1
supply_amount = np.max([i0max,i1max-i0max])
supplies = np.concatenate(([supply_amount],np.repeat(0,i1max),[-1*supply_amount])).tolist()
min_cost_flow = pywrapgraph.SimpleMinCostFlow()
# Add each arc.
for i in range(len(start_nodes)):
min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i], end_nodes[i],capacities[i], int(costs[i]))
# Add node supplies.
for i in range(len(supplies)):
min_cost_flow.SetNodeSupply(i, supplies[i])
ArcFrame = pd.DataFrame()
# Find the minimum cost flow between node 0 and node 4.
if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:
print('Minimum cost:', min_cost_flow.OptimalCost())
for i in range(min_cost_flow.NumArcs()):
cost = min_cost_flow.Flow(i) * min_cost_flow.UnitCost(i)
ArcFrame = ArcFrame.append(pd.DataFrame([min_cost_flow.Tail(i),
min_cost_flow.Head(i),
min_cost_flow.Flow(i),
min_cost_flow.Capacity(i),
cost]).T)
else:
print('There was an issue with the min cost flow input.')
ArcFrame = ArcFrame.rename(columns={0:'start',1:'end',2:"Flow",3:"Capacity",4:"Cost"})
#ArcFrame = ArcFrame.reset_index(drop=True)
FinalFrame = ArcFrame.loc[ArcFrame["Flow"]!=0,]
FinalFrame = FinalFrame.reset_index(drop=True)
return(FinalFrame)
def ReviewCostTable(minCostFlowtable, timepoint, OpeningCost=30,ClosingCost=30):
sink = max(minCostFlowtable["end"])
Transitions = minCostFlowtable.loc[(minCostFlowtable["start"]!=0)&(minCostFlowtable["end"]!=sink),]
trans_start_nodes = np.unique(Transitions["start"])
trans_end_nodes = np.unique(Transitions["end"])
#find nodes that either appear (no start) or disappear (no end)
appearing = minCostFlowtable[(~minCostFlowtable.start.isin(trans_start_nodes))&
(~minCostFlowtable.end.isin(trans_start_nodes))&
(~minCostFlowtable.start.isin(trans_end_nodes))&
(~minCostFlowtable.end.isin(trans_end_nodes))]
appearing = appearing.loc[(appearing["Cost"] == OpeningCost)|(appearing["Cost"] == ClosingCost)]
appearing = appearing.reset_index(drop=True)
appearFrame = | pd.DataFrame() | pandas.DataFrame |
#Usage
# import sys
# sys.path.insert(0,'path to this file')
# import functions as f
import pickle
import pandas as pd
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, GlobalMaxPooling1D, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding, Attention,Concatenate
from keras.models import Model
from sklearn.metrics import roc_auc_score,roc_curve, auc
from numpy import random
from keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout, GlobalAveragePooling1D
from keras.optimizers import Adam
from keras.utils.vis_utils import plot_model
import seaborn as sns
directory = '/content/drive/MyDrive/ML_Data/'
#Use this to create nD format input.
#For eg, to create 4D input, combine_AC(df,4)
def combine_AC(df,chunksize=3,seperate_chunks=False):
if not seperate_chunks:
df.Human = df.Human.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x))) if len(''.join(x)[i:i+chunksize])>=chunksize])
df.Yersinia = df.Yersinia.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x))) if len(''.join(x)[i:i+chunksize])>=chunksize])
try:
df.Joined = [df.loc[row]['Human']+df.loc[row]['Yersinia'] for row in range(df.shape[0])]
except:
df.Joined = df.Joined.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x))) if len(''.join(x)[i:i+chunksize])>=chunksize])
return df
#print("JHGVBJGHGHKHGKG")
df.Human = df.Human.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x)), chunksize)])
df.Yersinia = df.Yersinia.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x)), chunksize)])
df.Joined = [df.loc[row]['Human']+df.loc[row]['Yersinia'] for row in range(df.shape[0])]
return df
def shuff_together(df1,df2):
joined = pd.concat([df1,df2], axis=0)
joined = joined.iloc[np.random.permutation(len(joined))].reset_index(drop=True)
return joined.iloc[:df1.shape[0],:],joined.iloc[df1.shape[0]:,:].reset_index(drop=True)
def load_data(D=1,randomize=False):
try:
with open(directory+'df_train_'+str(D)+'D.pickle', 'rb') as handle:
df_train = pickle.load(handle)
except:
df_train = pd.read_pickle("C:/Users/nik00/py/proj/hyppi-train.pkl")
try:
with open(directory+'df_test_'+str(D)+'D.pickle', 'rb') as handle:
df_test = pickle.load(handle)
except:
df_test = | pd.read_pickle("C:/Users/nik00/py/proj/hyppi-independent.pkl") | pandas.read_pickle |
"""
The aim of this project was to build a classifier on the titanic kaggle dataset.
"""
### import libraries
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Pdf')
import matplotlib.pyplot as plt
# import data preprocessing modules
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelBinarizer
# import model selection modules
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
# import classifier modules
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
# import model evaluation metrics modules
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
### load data
train_data = pd.read_csv("train.csv")
test_data = | pd.read_csv("test.csv") | pandas.read_csv |
#! /usr/bin/env python3
"""
Model Checker Collection for the Model Checking Contest.
"""
import argparse
import hashlib
import math
import logging
import os
import random
import statistics
# import getpass
import json
import pathlib
import pickle
import platform
import re
import sys
import tempfile
import tarfile
import pandas
import xmltodict
import docker
from mcc4mcc.analysis import known, learned, score_of, max_score, \
characteristics_of, REMOVE
from mcc4mcc.model import Values, Data, value_of, CHARACTERISTICS
VERDICTS = {
"ORDINARY": "Ordinary",
"SIMPLE_FREE_CHOICE": "Simple Free Choice",
"EXTENDED_FREE_CHOICE": "Extended Free Choice",
"STATE_MACHINE": "State Machine",
"MARKED_GRAPH": "Marked Graph",
"CONNECTED": "Connected",
"STRONGLY_CONNECTED": "Strongly Connected",
"SOURCE_PLACE": "Source Place",
"SINK_PLACE": "Sink Place",
"SOURCE_TRANSITION": "Source Transition",
"SINK_TRANSITION": "Sink Transition",
"LOOP_FREE": "Loop Free",
"CONSERVATIVE": "Conservative",
"SUBCONSERVATIVE": "Sub-Conservative",
"NESTED_UNITS": "Nested Units",
"SAFE": "Safe",
"DEADLOCK": "Deadlock",
"REVERSIBLE": "Reversible",
"QUASI_LIVE": "Quasi Live",
"LIVE": "Live",
}
RENAMING = {
"tapaalPAR": "tapaal",
"tapaalSEQ": "tapaal",
"tapaalEXP": "tapaal",
"sift": "tina",
"tedd": "tina",
}
TEMPORARY = None
def unarchive(filename):
"""
Extract the model from an archive.
"""
# pylint: disable=global-statement
global TEMPORARY
# pylint: enable=global-statement
while True:
if os.path.isfile(filename):
directory = tempfile.TemporaryDirectory()
TEMPORARY = directory
logging.info(
f"Extracting archive {filename} "
f"to temporary directory {directory.name}.")
with tarfile.open(name=filename) as tar:
tar.extractall(path=directory.name)
if platform.system() == "Darwin":
filename = "/private" + directory.name
else:
filename = directory.name
elif os.path.isdir(filename):
if os.path.isfile(filename + "/model.pnml"):
logging.info(
f"Using directory {filename} for input, "
f"as it contains a model.pnml file.")
break
else:
parts = os.listdir(filename)
if len(parts) == 1:
filename = filename + "/" + parts[0]
else:
logging.error(
f"Cannot use directory {filename} for input, "
f"as it does not contain a model.pnml file.")
return None
else:
logging.error(
f"Cannot use directory {filename} for input, "
f"as it does not contain a model.pnml file.")
return None
return filename
def read_boolean(filename):
"""Read a Boolean file from the MCC."""
with open(filename, "r") as boolfile:
what = boolfile.readline().strip()
return value_of(what)
def do_extract(arguments):
"""
Main function for the extract command.
"""
if arguments.exclude is None:
arguments.exclude = []
else:
arguments.exclude = sorted(arguments.exclude.split(","))
if arguments.forget is None:
arguments.forget = []
else:
arguments.forget = sorted(arguments.forget.split(","))
# Compute prefix for generated files:
hasher = hashlib.md5()
with open(arguments.characteristics, "rb") as hinput:
hasher.update(hinput.read())
characteristics_hash = hasher.hexdigest()
hasher = hashlib.md5()
with open(arguments.results, "rb") as hinput:
hasher.update(hinput.read())
results_hash = hasher.hexdigest()
as_json = json.dumps({
"characteristics": characteristics_hash,
"results": results_hash,
"duplicates": arguments.duplicates,
"exclude": arguments.exclude,
"forget": arguments.forget,
"training": arguments.training,
"year": arguments.year,
}, sort_keys=True)
hasher = hashlib.md5()
hasher.update(bytearray(as_json, "utf8"))
prefix = hasher.hexdigest()[:8]
logging.info(f"Prefix is {prefix}.")
with open(f"{arguments.data}/{prefix}-configuration.json", "w") as output:
output.write(as_json)
# Load data:
data = Data({
"characteristics": arguments.characteristics,
"results": arguments.results,
"renaming": RENAMING,
"exclude": arguments.exclude,
"year": arguments.year,
})
options = {
"Choice": True,
"Duplicates": arguments.duplicates,
"Output Trees": arguments.output_trees,
"Directory": arguments.data,
"Prefix": prefix,
"Forget": arguments.forget,
"Training": arguments.training,
"Score": arguments.score,
}
# Read data:
data.characteristics()
# Compute the characteristics for models:
characteristics_of(data, options)
# Use results:
data.results()
examinations = {x["Examination"] for x in data.results()}
tools = {x["Tool"] for x in data.results()}
# Compute maximum score:
maxs = max_score(data, options)
total_score = 0
for _, subscore in maxs.items():
total_score += subscore
logging.info(f"Maximum score is {total_score}:")
for examination in examinations:
score = maxs[examination]
logging.info(f"* {examination}: {score}")
# Extract known data:
known_data = known(data)
with open(f"{arguments.data}/{prefix}-known.json", "w") as output:
json.dump(known_data, output)
# Extract learned data:
learned_data, values = learned(data, options)
with open(f"{arguments.data}/{prefix}-values.json", "w") as output:
json.dump(values.items, output)
# Compute scores for tools:
for tool in sorted(tools):
logging.info(f"Computing score of tool: {tool}.")
score = score_of(data, tool, options)
subresult = {
"Algorithm": tool,
"Is-Tool": True,
"Is-Algorithm": False,
}
total = 0
for key, value in score.items():
subresult[key] = value
total = total + value
learned_data.append(subresult)
ratio = math.ceil(100*total/total_score)
logging.info(f" Score: {total} / {total_score} ({ratio}%)")
with open(f"{arguments.data}/{prefix}-learned.json", "w") as output:
json.dump(learned_data, output)
# Print per-examination scores:
srt = []
for subresult in learned_data:
for examination in examinations:
srt.append({
"Name": subresult["Algorithm"],
"Examination": examination,
"Score": subresult[examination],
})
srt = sorted(srt, key=lambda e: (
e["Examination"], e["Score"], e["Name"]
), reverse=True)
for examination in sorted(examinations):
subscore = maxs[examination]
logging.info(f"In {examination}, maximum score {subscore}:")
for element in [x for x in srt if x["Examination"] == examination]:
score = element["Score"]
name = element["Name"]
if score > 0:
ratio = math.ceil(100*score/subscore)
logging.info(f"* {score} / {subscore} ({ratio}%) "
f"for {name}.")
def do_run(arguments):
"""
Main function for the run command.
"""
logging.info(f"Prefix is {arguments.prefix}.")
# Load known info:
logging.info(
f"Reading known information "
f"in {arguments.data}/{arguments.prefix}-known.json."
)
with open(f"{arguments.data}/{arguments.prefix}-known.json", "r") as i:
known_data = json.load(i)
# Load learned info:
logging.info(
f"Reading learned information "
f"in {arguments.data}/{arguments.prefix}-learned.json."
)
with open(f"{arguments.data}/{arguments.prefix}-learned.json", "r") as i:
learned_data = json.load(i)
# Load translations:
logging.info(
f"Reading value translations "
f"in {arguments.data}/{arguments.prefix}-values.json."
)
with open(f"{arguments.data}/{arguments.prefix}-values.json", "r") as i:
translations = json.load(i)
values = Values(translations)
# Find input:
directory = unarchive(arguments.input)
if directory is None:
sys.exit(1)
if arguments.instance is None:
instance = pathlib.PurePath(directory).stem
else:
instance = arguments.instance
split = re.search(r"([^-]+)\-([^-]+)\-([^-]+)$", instance)
model = split.group(1)
logging.info(f"Using {instance} as instance name.")
logging.info(f"Using {model} as model name.")
# Set tool:
known_tools = None
if arguments.tool is not None:
known_tools = [{
"Tool": arguments.tool,
"Time": None,
"Memory": None,
}]
else:
# Find known tools:
if arguments.examination not in known_data:
if instance not in known_data[arguments.examination]:
known_tools = known_data[arguments.examination][instance]
elif model not in known_data[arguments.examination]:
known_tools = known_data[arguments.examination][model]
if known_tools is None:
logging.warning(
f"Cannot find known information "
f"for examination {arguments.examination} "
f"on instance {instance} or model {model}.")
# Set algorithm:
learned_tools = None
if arguments.algorithm:
algorithm = sorted(
[x for x in learned_data
if x["Algorithm"] == arguments.algorithm],
key=lambda e: e[arguments.examination],
reverse=True,
)[0]["Algorithm"]
filename = f"{arguments.data}/{arguments.prefix}-learned.{algorithm}.p"
with open(filename, "rb") as i:
model = pickle.load(i)
else:
algorithm = sorted(
[x for x in learned_data if x["Is-Algorithm"]],
key=lambda e: e[arguments.examination],
reverse=True,
)[0]["Algorithm"]
filename = f"{arguments.data}/{arguments.prefix}-learned.{algorithm}.p"
with open(filename, "rb") as i:
model = pickle.load(i)
logging.info(f"Using algorithm or tool {algorithm}.")
# Find learned tools:
is_colored = read_boolean(f"{directory}/iscolored")
if is_colored:
has_pt = read_boolean(f"{directory}/equiv_pt")
else:
has_colored = read_boolean(f"{directory}/equiv_col")
with open(f"{directory}/GenericPropertiesVerdict.xml", "r") as i:
verdict = xmltodict.parse(i.read())
characteristics = {
"Examination": arguments.examination,
"Place/Transition": (not is_colored) or has_pt,
"Colored": is_colored or has_colored,
"Relative-Time": 1, # FIXME
"Relative-Memory": 1, # FIXME
}
for value in verdict["toolspecific"]["verdict"]:
if value["@value"] == "true":
characteristics[VERDICTS[value["@reference"]]] = True
elif value["@value"] == "false":
characteristics[VERDICTS[value["@reference"]]] = False
else:
characteristics[VERDICTS[value["@reference"]]] = None
logging.info(f"Model characteristics are: {characteristics}.")
# Load characteristics for machine learning:
test = {}
for key, value in characteristics.items():
test[key] = values.to_learning(value)
# http://scikit-learn.org/stable/modules/model_persistence.html
predicted = model.predict( | pandas.DataFrame([test]) | pandas.DataFrame |
import logging
import pandas as pd
from scipy.cluster.vq import kmeans2
from django.http import JsonResponse
from django.views import View
from . import forms
from . import models
logger = logging.getLogger(__name__)
class LocationListAPI(View):
def get(self, request, *args, **kwargs):
"""Summarize IPv6 density per location using the current API data.
Limit locations to a geographical region by passing in "north",
"south", "east", and "west" boundaries. Reduce the size of the data
by specifying a number of clusters.
"""
# Find the current version of API data.
self.version = models.Version.objects.get_current_version()
if not self.version:
error_msg = "Current version has not been configured."
logger.error(error_msg)
return JsonResponse({'error': error_msg}, status=404)
# Allow filtering locations by geographical bounds.
form = forms.LocationFilter(data=self.request.GET)
if not form.is_valid():
return JsonResponse({
'error': "Invalid boundary parameters.",
'form_errors': form.errors,
}, status=400)
# Find the location objects we're interested in.
locations = self.version.location_set.filter(**form.get_filters())
locations = locations.values_list('latitude', 'longitude', 'density')
locations = [(float(lat), float(lng), int(n)) for lat, lng, n in locations]
num_clusters = form.cleaned_data.get('clusters')
if num_clusters:
# Group lat/lng values into geographical groups.
df = pd.DataFrame(locations, columns=['lat', 'lng', 'density'])
coordinates, indices = kmeans2(df[['lat', 'lng']], num_clusters)
# Sum the density per group.
result = [pd.DataFrame(indices, columns=['coord']), df[['density']]]
result = | pd.concat(result, axis=1) | pandas.concat |
from IPython.display import display
import pandas as pd
import pyomo.environ as pe
import numpy as np
import csv
import os
import shutil
class inosys:
def __init__(self, inp_folder, ref_bus, dshed_cost = 1000000, rshed_cost = 500, phase = 3, vmin=0.85, vmax=1.15, sbase = 1, sc_fa = 1):
'''
Initialise the investment and operation problem.
:param str inp_folder: The input directory for the data. It expects to find several CSV files detailing the system input data (Default current folder)
:param float dshed_cost: Demand Shedding Price (Default 1000000)
:param float rshed_cost: Renewable Shedding Price (Default 500)
:param int phase: Number of Phases (Default 3)
:param float vmin: Minimum node voltage (Default 0.85)
:param float vmax: Maximum node voltage (Default 1.15)
:param float sbase: Base Apparent Power (Default 1 kW)
:param int ref_bus: Reference node
:param float sc_fa: Scaling Factor (Default 1)
:Example:
>>> import pyeplan
>>> sys_inv = pyeplan.inosys("wat_inv", ref_bus = 260)
'''
self.cgen = pd.read_csv(inp_folder + os.sep + 'cgen_dist.csv')
self.egen = pd.read_csv(inp_folder + os.sep + 'egen_dist.csv')
self.csol = pd.read_csv(inp_folder + os.sep + 'csol_dist.csv')
self.esol = pd.read_csv(inp_folder + os.sep + 'esol_dist.csv')
self.cwin = pd.read_csv(inp_folder + os.sep + 'cwin_dist.csv')
self.ewin = pd.read_csv(inp_folder + os.sep + 'ewin_dist.csv')
self.cbat = pd.read_csv(inp_folder + os.sep + 'cbat_dist.csv')
self.elin = pd.read_csv(inp_folder + os.sep + 'elin_dist.csv')
self.pdem = pd.read_csv(inp_folder + os.sep + 'pdem_dist.csv')
self.qdem = pd.read_csv(inp_folder + os.sep + 'qdem_dist.csv')
self.prep = pd.read_csv(inp_folder + os.sep + 'prep_dist.csv')
self.qrep = | pd.read_csv(inp_folder + os.sep + 'qrep_dist.csv') | pandas.read_csv |
from unittest.mock import ANY, MagicMock, patch
import pytest
import pandas as pd
from pandas._testing import assert_frame_equal
from muttlib.dbconn.base import BaseClient, EngineBaseClient
@pytest.fixture
def engine_baseClient():
client = EngineBaseClient(
database="database",
host="host",
password="password",
port=5544,
username="username",
dialect="mysql",
)
return client
def test_base_insert_from_frame_connection_not_none(engine_baseClient):
df = pd.DataFrame({'col1': ['1'], 'col2': ['3.0']})
with patch("muttlib.dbconn.base.create_engine") as create_engine, patch.object(
df, 'to_sql'
) as mock_to_sql:
table = "test_table"
engine = engine_baseClient._connect()
engine_baseClient.insert_from_frame(df, table, connection=engine)
create_engine.assert_called_once_with(
engine_baseClient.conn_str, connect_args=ANY, echo=ANY
)
mock_to_sql.assert_called_once_with(
table, engine, if_exists='append', index=False,
)
def test_base_execute_connection_and_params_not_none(engine_baseClient):
with patch("muttlib.dbconn.base.create_engine") as create_engine:
engine = engine_baseClient._connect()
q = "SELECT * FROM {table} WHERE {condition1}"
params = {"table": "test", "condition1": "id = 1"}
engine_baseClient.execute(q, params, connection=engine)
engine.execute.assert_called_once_with(q.format(**params))
def test_base_to_frame_data_none(engine_baseClient):
with patch("muttlib.dbconn.base.create_engine") as create_engine:
q = "SELECT *"
create_engine.return_value.connect.return_value.__enter__.return_value.execute.return_value.fetchall.return_value = (
None
)
df = engine_baseClient.to_frame(q)
assert_frame_equal(df, pd.DataFrame())
def test_base_setattr_getattr(engine_baseClient):
engine_baseClient.__setattr__('driver', "pymysql")
assert engine_baseClient.__getattr__('driver') == "pymysql"
engine_baseClient.__setattr__('dialect', "mysql")
assert engine_baseClient.__getattr__('dialect') == "mysql"
@patch("muttlib.dbconn.base.BaseClient.__abstractmethods__", set())
def test_base_to_frame_rise_not_implemented_method():
with patch("muttlib.dbconn.base.create_engine") as create_engine:
base_cli = BaseClient(dialect="mysql",)
q = "SELECT *"
with pytest.raises(NotImplementedError):
base_cli.to_frame(q)
@patch("muttlib.dbconn.base.BaseClient.__abstractmethods__", set())
def test_base_insert_from_frame_if_exists_replace_fail_rise_not_implemented_method():
with patch("muttlib.dbconn.base.create_engine") as create_engine:
base_cli = BaseClient(dialect="mysql",)
df = pd.DataFrame({'col1': ['1'], 'col2': ['3.0']})
table = "test_table"
with pytest.raises(NotImplementedError):
base_cli.insert_from_frame(df, table, if_exists='replace')
with pytest.raises(NotImplementedError):
base_cli.insert_from_frame(df, table, if_exists='fail')
@patch("muttlib.dbconn.base.BaseClient.__abstractmethods__", set())
def test_base_insert_from_frame_index_true_rise_not_implemented_method():
with patch("muttlib.dbconn.base.create_engine") as create_engine:
base_cli = BaseClient(dialect="mysql",)
df = pd.DataFrame({'col1': ['1'], 'col2': ['3.0']})
table = "test_table"
with pytest.raises(NotImplementedError):
base_cli.insert_from_frame(df, table, index=True)
@patch("muttlib.dbconn.base.BaseClient.__abstractmethods__", set())
def test_base_insert_from_frame_chunk():
with patch("muttlib.dbconn.base.create_engine") as create_engine:
base_cli = BaseClient(dialect="mysql",)
base_cli._connect = create_engine.get_engine()
df = | pd.DataFrame({'col1': ['1'], 'col2': ['3.0']}) | pandas.DataFrame |
import pathlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from prophet import Prophet
from sklearn import metrics
def get_prophet_data(stock_path):
with open(stock_path, 'r', encoding='utf-8') as f:
df = pd.read_json(f.read(), orient='records')
print(df)
# rename
df.rename(columns={'Date': 'ds', 'Close': 'y'}, inplace=True)
print(df)
return df
def predict_single_var_future(df_data, header_name, forecast_periods):
df_data.rename(columns={header_name: 'y'}, inplace=True)
df_log = df_data.copy()
df_log['y'] = np.log(df_log['y'])
m = Prophet()
m.fit(df_log)
future = m.make_future_dataframe(periods=forecast_periods)
forecast = m.predict(future)
print(forecast.head())
print(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail())
# fig1 = m.plot(forecast)
# fig2 = m.plot_components(forecast)
df_close = pd.DataFrame(df_data[['ds', 'y']]).set_index('ds')
print(df_close)
forecast_with_org_data = forecast.set_index('ds').join(df_close)
print(forecast_with_org_data)
forecast_with_org_data = forecast_with_org_data[['y', 'yhat', 'yhat_upper', 'yhat_lower']]
forecast_with_org_data['yhat'] = np.exp(forecast_with_org_data.yhat)
forecast_with_org_data['yhat_upper'] = np.exp(forecast_with_org_data.yhat_upper)
forecast_with_org_data['yhat_lower'] = np.exp(forecast_with_org_data.yhat_lower)
forecast_with_org_data[['y', 'yhat', 'yhat_upper', 'yhat_lower']].plot(figsize=(8, 6))
print(forecast_with_org_data)
# plt.show()
forecast_with_org_data.rename(columns={'yhat': header_name}, inplace=True)
return forecast_with_org_data[header_name][-1*forecast_periods:]
def main():
| pd.set_option('display.max_columns', None) | pandas.set_option |
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
class TestToFrame:
def test_to_frame(self, datetime_series):
datetime_series.name = None
rs = datetime_series.to_frame()
xp = DataFrame(datetime_series.values, index=datetime_series.index)
tm.assert_frame_equal(rs, xp)
datetime_series.name = "testname"
rs = datetime_series.to_frame()
xp = DataFrame(
{"testname": datetime_series.values}, index=datetime_series.index
)
tm.assert_frame_equal(rs, xp)
rs = datetime_series.to_frame(name="testdifferent")
xp = DataFrame(
{"testdifferent": datetime_series.values}, index=datetime_series.index
)
tm.assert_frame_equal(rs, xp)
def test_to_frame_expanddim(self):
# GH#9762
class SubclassedSeries(Series):
@property
def _constructor_expanddim(self):
return SubclassedFrame
class SubclassedFrame(DataFrame):
pass
ser = SubclassedSeries([1, 2, 3], name="X")
result = ser.to_frame()
assert isinstance(result, SubclassedFrame)
expected = SubclassedFrame({"X": [1, 2, 3]})
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from gensim.models import LdaModel
from gensim.matutils import Sparse2Corpus
from scipy import sparse
from itertools import product
from random import shuffle
from time import time
import spacy
import logging
pd.set_option('display.expand_frame_repr', False)
np.random.seed(42)
nlp = spacy.load('en')
logging.basicConfig(
filename='gensim.log',
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%H:%M:%S')
def format_time(t):
m_, s = divmod(t, 60)
h, m = divmod(m_, 60)
return f'{h:>02.0f}:{m:>02.0f}:{s:>02.0f}'
clean_text = Path('clean_text.txt')
# experiment setup
cols = ['vocab_size', 'test_vocab', 'min_df', 'max_df', 'binary', 'num_topics', 'passes', 'perplexity']
experiment_path = Path('experiments')
# get text files
clean_docs = clean_text.read_text().split('\n')
print('\n', len(clean_docs))
train_docs, test_docs = train_test_split(clean_docs, test_size=.1)
# dtm params
min_dfs = [50, 100, 250, 500]
max_dfs = [.1, .25, .5, 1.0]
binarys = [True, False]
dtm_params = list(product(*[min_dfs, max_dfs, binarys]))
n = len(dtm_params)
shuffle(dtm_params)
topicss = [3, 5, 7, 10, 15, 20, 25, 50]
passess = [1, 25]
model_params = list(product(*[topicss, passess]))
corpus = id2word = train_corpus = train_tokens = test_corpus = vocab_size = test_vocab = None
start = time()
for i, (min_df, max_df, binary) in enumerate(dtm_params, 1):
print(min_df, max_df, binary)
result = []
vocab_path = experiment_path / str(min_df) / str(max_df) / str(int(binary))
if vocab_path.exists():
continue
else:
vocab_path.mkdir(exist_ok=True, parents=True)
vectorizer = CountVectorizer(min_df=min_df,
max_df=max_df,
binary=binary)
train_dtm = vectorizer.fit_transform(train_docs)
train_corpus = Sparse2Corpus(train_dtm, documents_columns=False)
train_tokens = vectorizer.get_feature_names()
test_dtm = vectorizer.transform(test_docs)
test_corpus = Sparse2Corpus(test_dtm, documents_columns=False)
test_vocab = test_dtm.count_nonzero()
dtm = vectorizer.fit_transform(clean_docs)
sparse.save_npz(vocab_path / f'dtm.npz', dtm)
tokens = vectorizer.get_feature_names()
vocab_size = len(tokens)
pd.Series(tokens).to_csv(vocab_path / f'tokens.csv', index=False)
id2word = pd.Series(tokens).to_dict()
corpus = Sparse2Corpus(dtm, documents_columns=False)
coherence = pd.DataFrame()
for num_topics, passes in model_params:
model_path = vocab_path / str(num_topics) / str(passes)
if not model_path.exists():
model_path.mkdir(exist_ok=True, parents=True)
print((num_topics, passes), end=' ', flush=True)
lda = LdaModel(corpus=corpus,
num_topics=num_topics,
id2word=id2word,
passes=passes,
eval_every=None,
random_state=42)
doc_topics = | pd.DataFrame() | pandas.DataFrame |
from typing import Union, Optional
import pytest
import scanpy as sc
import cellrank.external as cre
from anndata import AnnData
from cellrank.tl.kernels import ConnectivityKernel
from cellrank.external.kernels._utils import MarkerGenes
from cellrank.external.kernels._wot_kernel import LastTimePoint
import numpy as np
import pandas as pd
from scipy.sparse import spmatrix, csr_matrix
from pandas.core.dtypes.common import is_categorical_dtype
from matplotlib.cm import get_cmap
from matplotlib.colors import to_hex
class TestOTKernel:
def test_no_connectivities(self, adata_large: AnnData):
del adata_large.obsp["connectivities"]
terminal_states = np.full((adata_large.n_obs,), fill_value=np.nan, dtype=object)
ixs = np.where(adata_large.obs["clusters"] == "Granule immature")[0]
terminal_states[ixs] = "GI"
ok = cre.kernels.StationaryOTKernel(
adata_large,
terminal_states=pd.Series(terminal_states).astype("category"),
g=np.ones((adata_large.n_obs,), dtype=np.float64),
)
ok = ok.compute_transition_matrix(1, 0.001)
assert ok._conn is None
np.testing.assert_allclose(ok.transition_matrix.sum(1), 1.0)
def test_method_not_implemented(self, adata_large: AnnData):
terminal_states = np.full((adata_large.n_obs,), fill_value=np.nan, dtype=object)
ixs = np.where(adata_large.obs["clusters"] == "Granule immature")[0]
terminal_states[ixs] = "GI"
ok = cre.kernels.StationaryOTKernel(
adata_large,
terminal_states=pd.Series(terminal_states).astype("category"),
g=np.ones((adata_large.n_obs,), dtype=np.float64),
)
with pytest.raises(
NotImplementedError, match="Method `'unbal'` is not yet implemented."
):
ok.compute_transition_matrix(1, 0.001, method="unbal")
def test_no_terminal_states(self, adata_large: AnnData):
with pytest.raises(RuntimeError, match="Unable to initialize the kernel."):
cre.kernels.StationaryOTKernel(
adata_large,
g=np.ones((adata_large.n_obs,), dtype=np.float64),
)
def test_normal_run(self, adata_large: AnnData):
terminal_states = np.full((adata_large.n_obs,), fill_value=np.nan, dtype=object)
ixs = np.where(adata_large.obs["clusters"] == "Granule immature")[0]
terminal_states[ixs] = "GI"
ok = cre.kernels.StationaryOTKernel(
adata_large,
terminal_states= | pd.Series(terminal_states) | pandas.Series |
#!/usr/bin/env python
"""Tests for `pubchem_api` package."""
import os
import numpy as np
import pandas as pd
import scipy
from scipy.spatial import distance
import unittest
# from click.testing import CliRunner
# from structure_prediction import cli
class TestDataPreprocessing(unittest.TestCase):
"""Tests for `data pre-processing` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_001_adjacency_matrix_ok(self):
"""
Tests to ensure that adjacency matrix prepared correctly
To show that distance.pdist function calculates correctly on a simple test array
"""
print("Test One... To show that distance.pdist function calculates correctly on a simple test array")
test_array_1 = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]])
a = np.sqrt(((1-1)**2) + ((2-2)**2) + ((3-3)**2))
b = np.sqrt(((1-4)**2) + ((2-5)**2) + ((3-6)**2))
c = np.sqrt(((1-7)**2) + ((2-8)**2) + ((3-9)**2))
d = np.sqrt(((1-10)**2) + ((2-11)**2) + ((3-12)**2))
e = np.sqrt(((4-1)**2) + ((5-2)**2) + ((6-3)**2))
f = np.sqrt(((4-4)**2) + ((5-5)**2) + ((6-6)**2))
g = np.sqrt(((4-7)**2) + ((5-8)**2) + ((6-9)**2))
h = np.sqrt(((4-10)**2) + ((5-11)**2) + ((6-12)**2))
i = np.sqrt(((7-1)**2) + ((8-2)**2) + ((9-3)**2))
j = np.sqrt(((7-4)**2) + ((8-5)**2) + ((9-6)**2))
k = np.sqrt(((7-7)**2) + ((8-8)**2) + ((9-9)**2))
l = np.sqrt(((7-10)**2) + ((8-11)**2) + ((9-12)**2))
m = np.sqrt(((10-1)**2) + ((11-2)**2) + ((12-3)**2))
n = np.sqrt(((10-4)**2) + ((11-5)**2) + ((12-6)**2))
o = np.sqrt(((10-7)**2) + ((11-8)**2) + ((12-9)**2))
p = np.sqrt(((10-10)**2) + ((11-11)**2) + ((12-12)**2))
result_array = np.array([[a, b, c, d],
[e, f, g, h],
[i, j, k, l],
[m, n, o, p]])
print(result_array)
calculate_distances = distance.pdist(test_array_1, 'euclidean')
make_square = distance.squareform(calculate_distances)
print(make_square)
assert np.array_equal(result_array, make_square)
def test_002_adjacency_matrix_ok(self):
"""
Tests to ensure that adjacency matrix prepared correctly
To show that distance.pdist function calculates correctly on a simple test array
"""
print(("Test Two... To show that distance.pdist function calculates correctly on a simple test array"))
test_array_1 = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]])
a = np.sqrt(((1-1)**2) + ((2-2)**2) + ((3-3)**2))
b = np.sqrt(((1-4)**2) + ((2-5)**2) + ((3-6)**2))
c = np.sqrt(((1-7)**2) + ((2-8)**2) + ((3-9)**2))
d = np.sqrt(((1-10)**2) + ((2-11)**2) + ((3-12)**2))
e = np.sqrt(((4-1)**2) + ((5-2)**2) + ((6-3)**2))
f = np.sqrt(((4-4)**2) + ((5-5)**2) + ((6-6)**2))
g = np.sqrt(((4-7)**2) + ((5-8)**2) + ((6-9)**2))
h = np.sqrt(((4-10)**2) + ((5-11)**2) + ((6-12)**2))
i = np.sqrt(((7-1)**2) + ((8-2)**2) + ((9-3)**2))
j = np.sqrt(((7-4)**2) + ((8-5)**2) + ((9-6)**2))
k = np.sqrt(((7-7)**2) + ((8-8)**2) + ((9-9)**2))
l = np.sqrt(((7-10)**2) + ((8-11)**2) + ((9-12)**2))
m = np.sqrt(((10-1)**2) + ((11-2)**2) + ((12-3)**2))
n = np.sqrt(((10-4)**2) + ((11-5)**2) + ((12-6)**2))
o = np.sqrt(((10-7)**2) + ((11-8)**2) + ((12-9)**2))
p = np.sqrt(((10-10)**2) + ((11-11)**2) + ((12-12)**2))
result_array = np.array([[a, b, c, d],
[e, f, g, h],
[i, j, k, l],
[m, n, o, p]])
calculate_distances = distance.pdist(test_array_1, 'euclidean')
make_square = distance.squareform(calculate_distances)
for i in range(0,result_array.shape[1]):
# print(result_array[i,i])
self.assertEqual(result_array[i,i], 0)
for i in range(0,make_square.shape[1]):
# print(make_square[i,i])
self.assertEqual(make_square[i,i], 0)
def test_003_adjacency_matrix_ok(self):
"""
Tests to ensure that adjacency matrix prepared correctly
To show that distance.pdist function calculates correctly on a pdb.cif file
"""
print("Test Three... To show that distance.pdist function calculates correctly on a pdb.cif file")
with open('./extracted_test_data/1j5a.cif') as infile:
target_list = infile.read().split('\n')
df_1 = pd.DataFrame(data=target_list, columns=["header"]) # Put list in a dataframe m X 1 column
df_1 = df_1[:-1] # Removes additional row that is included
cif_to_df_2 = df_1.header.str.split(expand=True) # Put dataframe to m x 20 columns
critical_info_to_df_3 = cif_to_df_2.drop(columns=[0, 1, 2, 3, 4, 6, 7, 8, 9, 13, 14, 15, 16, 17, 18, 19, 20], axis=1) # df containing aa & coordinate positions
print(critical_info_to_df_3.head())
convert_to_array = critical_info_to_df_3.drop(columns=[5], axis=1).to_numpy() # Removes aa flag & contains only coordinate info
calculate_distances = distance.pdist(convert_to_array, 'euclidean')
make_square = distance.squareform(calculate_distances)
print(make_square)
assert df_1.shape[0] == cif_to_df_2.shape[0]
assert cif_to_df_2.shape[0] == critical_info_to_df_3.shape[0]
def test_004_adjacency_matrix_ok(self):
"""
Tests to ensure that adjacency matrix prepared correctly
To show that distance.pdist function calculates correctly on a pdb.cif file
"""
print("Test Four... To show that distance.pdist function calculates correctly on a pdb.cif file")
with open('./extracted_test_data/1j5a.cif') as infile:
target_list = infile.read().split('\n')
df_1 = pd.DataFrame(data=target_list, columns=["header"]) # Put list in a dataframe m X 1 column
df_1 = df_1[:-1] # Removes additional row that is included
cif_to_df_2 = df_1.header.str.split(expand=True) # Put dataframe to m x 20 columns
critical_info_to_df_3 = cif_to_df_2.drop(columns=[0, 1, 2, 3, 4, 6, 7, 8, 9, 13, 14, 15, 16, 17, 18, 19, 20], axis=1) # df containing aa & coordinate positions
convert_to_array = critical_info_to_df_3.drop(columns=[5], axis=1).to_numpy() # Removes aa flag & contains only coordinate info
calculate_distances = distance.pdist(convert_to_array, 'euclidean')
make_square = distance.squareform(calculate_distances)
for i in range(0,make_square.shape[1]):
print(make_square[i,i])
self.assertEqual(make_square[i,i], 0)
def test_005_adjacency_matrix_ok(self):
"""
Tests to ensure that adjacency matrix prepared correctly
To show that adjacency matrix maintains its form when converted back into a dataframe
"""
print("Test Five...")
with open('./extracted_test_data/1j5a.cif') as infile:
target_list = infile.read().split('\n')
df_1 = pd.DataFrame(data=target_list, columns=["header"]) # Put list in a dataframe m X 1 column
df_1 = df_1[:-1] # Removes additional row that is included
cif_to_df_2 = df_1.header.str.split(expand=True) # Put dataframe to m x 20 columns
critical_info_to_df_3 = cif_to_df_2.drop(columns=[0, 1, 2, 3, 4, 6, 7, 8, 9, 13, 14, 15, 16, 17, 18, 19, 20], axis=1) # df containing aa & coordinate positions
convert_to_array = critical_info_to_df_3.drop(columns=[5], axis=1).to_numpy() # Removes aa flag & contains only coordinate info
calculate_distances = distance.pdist(convert_to_array, 'euclidean')
make_square = distance.squareform(calculate_distances)
adjacency_matrix_df_4 = | pd.DataFrame(make_square) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
import pickle
from sklearn.model_selection import KFold
from sklearn.metrics import precision_recall_curve
import sklearn.metrics as metrics
from model import lightgbm_train
from glob import glob
from utils import *
import shap
from collections import defaultdict
def load_data(all_fpath, n, f):
"""
params
all_fpath: a list of path of all sample files
n: int
last n records used for prediction
f: list
extra features
yields
all_matrix: Numpy array
all feature matrix of all files in all_fpath
fnames: list
featrue names
"""
all_matrix =[]
for fpath in all_fpath:
assert os.path.exists(fpath), "File '"+fpath+"' not exist!"
d=pd.read_csv(fpath, sep = '|', header = 0)
m, fnames = construct_feature_matrix(d, n, f)
all_matrix.append(m)
all_matrix=np.concatenate(all_matrix, axis = 0)
return all_matrix, fnames
def evaluation(gs,pred):
"""
params
gs
pred
yields
the_auc
the_auprc
"""
#auc
fpr, tpr, thresholds = metrics.roc_curve(gs, pred, pos_label=1)
the_auc=metrics.auc(fpr, tpr)
#aurpc
precision, recall, thresholds = precision_recall_curve(gs, pred)
the_auprc = metrics.auc(recall, precision)
return the_auc, the_auprc
def five_fold_cv(gs_filepath, n, f, shap):
"""
params
gs_filepath
n
f
yields
"""
gs_file = pd.read_csv(gs_filepath, header = None)
f_path = gs_file[0].to_list()
f_gs = gs_file[1].to_list()
kf = KFold(n_splits=5, random_state= 0, shuffle= True)
out_eva = open('eva.tsv', 'w')
out_eva.write("%s\t%s\t%s\n" %('fold', 'AUROC', 'AUPRC'))
if shap:
all_feature_shap = []
all_t_shap = []
for i,(train_idx, test_idx) in enumerate(kf.split(f_path)):
print("Start fold "+str(i)+" in five-fold cross-validation ..")
# load train
print("Load training data ...")
train_f = [f_path[j] for j in train_idx]
train_matrix, _ = load_data(train_f, n, f)
train_gs = [f_gs[j] for j in train_idx]
# train model
print("Start training ...")
gbm = lightgbm_train(train_matrix, train_gs)
os.makedirs('./models', exist_ok = True)
filename = './models/finalized_model.sav.'+str(i)
print('Saving model to '+ filename+ '...') # save model to file
pickle.dump(gbm, open(filename, 'wb'))
# load test
print("Load test data ...")
test_f = [f_path[j] for j in test_idx]
test_matrix,f_names = load_data(test_f, n, f)
test_gs = [f_gs[j] for j in test_idx]
# test model
print("Start evaludation ...")
test_pred = gbm.predict(test_matrix)
# evaluation
the_auc, the_auprc = evaluation(test_gs, test_pred)
print("AUC:%.4f; AUPRC: %.4f" % (the_auc, the_auprc))
out_eva.write("%d\t%.4f\t%.4f\n" %(i, the_auc, the_auprc))
# SHAP
if shap:
print("Start SHAP analysis ...")
feature_shap, t_shap = shap_analysis(gbm, test_matrix, f_names)
feature_shap['fold'] = i
t_shap['fold'] = i
all_feature_shap.append(feature_shap)
all_t_shap.append(t_shap)
out_eva.close()
if shap:
all_feature_shap = pd.concat(all_feature_shap)
all_t_shap = pd.concat(all_t_shap)
all_feature_shap.to_csv('shap_group_by_measurment.csv', index = False)
all_t_shap.to_csv('shap_group_by_timeslot.csv', index = False)
def specific_evaluation(gs_filepath, n, f):
""" Conduct specific evaludation and shap analysis at the specified dataset
Params
gs_filepath
n
f
Yields
"""
gs_file = pd.read_csv(gs_filepath, header = None)
f_path = gs_file[0].to_list()
f_gs = gs_file[1].to_list()
test_idx = [55,73,75,78,86,92,93,95]# range(len(f_path))
print(test_idx)
model_paths = glob('./models/finalized_model.sav.*')
for p in model_paths:
gbm = pickle.load(open(p, 'rb'))
# load test
test_f = [f_path[j] for j in test_idx]
test_matrix, f_names = load_data(test_f, n, f)
test_gs = [f_gs[j] for j in test_idx]
test_pred =gbm.predict(test_matrix)
# evaluation
the_auc, the_auprc = evaluation(test_gs, test_pred)
print(the_auc, the_auprc)
# SHAP analysis
feature_shap, t_shap = shap_analysis(gbm, test_matrix, f_names)
feature_shap.to_csv('shap_group_by_measurment.csv', index= False)
t_shap.to_csv('shap_group_by_timeslot.csv', index = False)
def shap_analysis(regressor, Test_X, f_names):
""" SHAP analysis on a sspecific dataset
Params
regressor
df
Yields
"""
shap_values = shap.TreeExplainer(regressor).shap_values(Test_X)
all_f_dict = defaultdict(lambda:[])
for i, n in enumerate(f_names):
all_f_dict[n.split('|')[0]].append(i) # get all unique features
#print(all_f_dict)
time_p_dict = defaultdict(lambda:[])
for i, n in enumerate(f_names):
if n.endswith('ori') or n.endswith('norm'):
time_p_dict[n.split('|')[2]].append(i) # ge all unique timepoints
#print(time_p_dict)
# combine subfeatures for each time slots
feature_shap = {"feature":[], "mean|SHAP val|":[]}
for k, v in all_f_dict.items():
feature_shap["feature"].append(k)
feature_shap["mean|SHAP val|"].append(abs(shap_values[:, v].sum(axis = 1)).mean())
feature_shap = pd.DataFrame.from_dict(feature_shap)
#.sort_values(by="mean|SHAP val|", ascending = False).set_index('feature')
# combine subfeatures for each feature types
t_shap = {"the_last_nth_timepoint":[], "mean|SHAP val|":[]}
for k, v in time_p_dict.items():
t_shap["the_last_nth_timepoint"].append(k)
t_shap["mean|SHAP val|"].append(abs(shap_values[:, v].sum(axis = 1)).mean())
t_shap = | pd.DataFrame.from_dict(t_shap) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from odps.tests.core import TestBase
from odps.config import option_context
from odps.compat import unittest
from odps.models import Schema
from odps.df.expr.expressions import *
from odps.df.expr.core import ExprDictionary
from odps.df.expr import errors
from odps.df.expr.tests.core import MockTable
from odps.df.expr.arithmetic import Add
class Test(TestBase):
def setup(self):
schema = Schema.from_lists(['name', 'id', 'fid'], [types.string, types.int64, types.float64])
table = MockTable(name='pyodps_test_expr_table', schema=schema)
table._client = self.config.odps.rest
self.expr = CollectionExpr(_source_data=table, _schema=schema)
schema2 = Schema.from_lists(['name', 'id', 'fid'], [types.string, types.int64, types.float64],
['part1', 'part2'], [types.string, types.int64])
table2 = MockTable(name='pyodps_test_expr_table2', schema=schema2)
table2._client = self.config.odps.rest
self.expr2 = CollectionExpr(_source_data=table2, _schema=schema2)
def testDir(self):
expr_dir = dir(self.expr)
self.assertIn('id', expr_dir)
self.assertIn('fid', expr_dir)
new_df = self.expr[self.expr.id, self.expr.fid, self.expr.name.rename('if')]
self.assertNotIn('if', dir(new_df))
self.assertEqual(self.expr._id, self.expr.copy()._id)
def testProjection(self):
projected = self.expr['name', self.expr.id.rename('new_id')]
self.assertIsInstance(projected, CollectionExpr)
self.assertEqual(projected._schema,
Schema.from_lists(['name', 'new_id'], [types.string, types.int64]))
projected = self.expr[[self.expr.name, self.expr.id.astype('string')]]
self.assertIsInstance(projected, ProjectCollectionExpr)
self.assertEqual(projected._schema,
Schema.from_lists(['name', 'id'], [types.string, types.string]))
projected = self.expr.select(self.expr.name, Scalar('abc').rename('word'), size=5)
self.assertIsInstance(projected, ProjectCollectionExpr)
self.assertEqual(projected._schema,
Schema.from_lists(['name', 'word', 'size'],
[types.string, types.string, types.int8]))
self.assertIsInstance(projected._fields[1], StringScalar)
self.assertEqual(projected._fields[1].value, 'abc')
self.assertIsInstance(projected._fields[2], Int8Scalar)
self.assertEqual(projected._fields[2].value, 5)
expr = self.expr[lambda x: x.exclude('id')]
self.assertEqual(expr.schema.names, [n for n in expr.schema.names if n != 'id'])
self.assertRaises(ExpressionError, lambda: self.expr[self.expr.distinct('id', 'fid'), 'name'])
self.assertRaises(ExpressionError, lambda: self.expr[[self.expr.id + self.expr.fid]])
with option_context() as options:
options.interactive = True
self.expr['name', 'id'][[self.expr.name, ]]
self.assertRaises(ExpressionError, lambda: self.expr[self.expr.name])
self.assertRaises(ExpressionError, lambda: self.expr['name', self.expr.groupby('name').id.sum()])
expr = self.expr.filter(self.expr.id < 0)
expr[self.expr.name, self.expr.id]
def testFilter(self):
filtered = self.expr[(self.expr.id < 10) & (self.expr.name == 'test')]
self.assertIsInstance(filtered, FilterCollectionExpr)
filtered = self.expr.filter(self.expr.id < 10, self.expr.name == 'test')
self.assertIsInstance(filtered, FilterCollectionExpr)
def testSlice(self):
sliced = self.expr[:100]
self.assertIsInstance(sliced, SliceCollectionExpr)
self.assertEqual(sliced._schema, self.expr._schema)
self.assertIsInstance(sliced._indexes, tuple)
not_sliced = self.expr[:]
self.assertNotIsInstance(not_sliced, SliceCollectionExpr)
self.assertIsInstance(not_sliced, CollectionExpr)
def testAsType(self):
fid = self.expr.id.astype('float')
self.assertIsInstance(fid._source_data_type, types.Int64)
self.assertIsInstance(fid._data_type, types.Float64)
self.assertIsInstance(fid, Float64SequenceExpr)
self.assertNotIsInstance(fid, Int64SequenceExpr)
int_fid = fid.astype('int')
self.assertIsInstance(int_fid._source_data_type, types.Int64)
self.assertIsInstance(int_fid._data_type, types.Int64)
self.assertIsInstance(int_fid, Int64SequenceExpr)
self.assertNotIsInstance(int_fid, Float64SequenceExpr)
float_fid = (fid + 1).astype('float32')
self.assertIsInstance(float_fid, Float32SequenceExpr)
self.assertNotIsInstance(float_fid, Int32SequenceExpr)
self.assertIsInstance(float_fid, AsTypedSequenceExpr)
def testRename(self):
new_id = self.expr.id.rename('new_id')
self.assertIsInstance(new_id, SequenceExpr)
self.assertEqual(new_id._source_name, 'id')
self.assertEqual(new_id._name, 'new_id')
double_new_id = new_id.rename('2new_id')
self.assertIsInstance(double_new_id, SequenceExpr)
self.assertEqual(double_new_id._source_name, 'id')
self.assertEqual(double_new_id._name, '2new_id')
self.assertIsNot(double_new_id, new_id)
add_id = (self.expr.id + self.expr.fid).rename('add_id')
self.assertIsInstance(add_id, Float64SequenceExpr)
self.assertNotIsInstance(add_id, Int64SequenceExpr)
self.assertIsNone(add_id._source_name)
self.assertIsInstance(add_id, Add)
self.assertEqual(add_id.name, 'add_id')
self.assertIsInstance(add_id._lhs, Int64SequenceExpr)
self.assertIsInstance(add_id._rhs, Float64SequenceExpr)
self.assertEqual(add_id._lhs._source_name, 'id')
self.assertEqual(add_id._rhs._source_name, 'fid')
add_scalar_id = (self.expr.id + 5).rename('add_s_id')
self.assertNotIsInstance(add_scalar_id, Float64SequenceExpr)
self.assertIsInstance(add_scalar_id, Int64SequenceExpr)
self.assertIsInstance(add_scalar_id, Add)
self.assertEqual(add_scalar_id.name, 'add_s_id')
self.assertEqual(add_scalar_id._lhs._source_name, 'id')
def testNewSequence(self):
column = Column(_data_type='int32')
self.assertIn(Int32SequenceExpr, type(column).mro())
self.assertIsInstance(column, Int32SequenceExpr)
column = type(column)._new(_data_type='string')
self.assertNotIn(Int32SequenceExpr, type(column).mro())
self.assertIn(StringSequenceExpr, type(column).mro())
self.assertIsInstance(column, StringSequenceExpr)
self.assertNotIsInstance(column, Int32SequenceExpr)
self.assertIsInstance(column, Column)
seq = SequenceExpr(_data_type='int64')
self.assertIsInstance(seq, Int64SequenceExpr)
seq = BooleanSequenceExpr(_data_type='boolean')
self.assertIsInstance(seq, BooleanSequenceExpr)
seq = DatetimeSequenceExpr(_data_type='float32')
self.assertIsInstance(seq, Float32SequenceExpr)
class Int64Column(Column):
__slots__ = 'test',
column = Int64Column(_data_type='float64', test='value')
self.assertIsInstance(column, Float64SequenceExpr)
self.assertNotIsInstance(column, Int64SequenceExpr)
column = type(column)._new(_data_type='int8', test=column.test)
self.assertEqual(column.test, 'value')
self.assertIsInstance(column, Int8SequenceExpr)
self.assertNotIsInstance(column, Float64SequenceExpr)
self.assertNotIsInstance(column, Int64SequenceExpr)
self.assertIsInstance(column, Int64Column)
class Int64Column(Int64SequenceExpr):
pass
column = Int64Column(_data_type='float64')
self.assertIsInstance(column, Float64SequenceExpr)
self.assertNotIsInstance(column, Int64SequenceExpr)
column = type(column)._new(_data_type='int8')
self.assertIsInstance(column, Int8SequenceExpr)
self.assertNotIsInstance(column, Float64SequenceExpr)
self.assertNotIsInstance(column, Int64SequenceExpr)
self.assertNotIsInstance(column, Int64Column)
def testSequenceCache(self):
df = self.expr.name
self.assertRaises(ExpressionError, lambda: df.cache())
def testExprFieldValidation(self):
df = self.expr
self.assertRaises(errors.ExpressionError, lambda: df[df[:10].id])
df2 = self.expr[['id']]
self.assertRaises(errors.ExpressionError, lambda: df[df2.id])
def testFilterPartition(self):
self.assertRaises(ExpressionError, lambda: self.expr.filter_partition(None))
self.assertRaises(ExpressionError, lambda: self.expr.filter_partition('part1=a/part2=1,part1=b/part2=2'))
self.assertRaises(ExpressionError, lambda: self.expr2.filter_partition('part1/part2=1,part1=b/part2=2'))
filtered1 = self.expr2.filter_partition('part1=a/part2=1,part1=b/part2=2')
self.assertIsInstance(filtered1, FilterPartitionCollectionExpr)
self.assertEqual(filtered1.schema, self.expr.schema)
self.assertEqual(filtered1.predicate_string, 'part1=a/part2=1,part1=b/part2=2')
filtered2 = self.expr2.filter_partition('part1=a/part2=1,part1=b/part2=2', exclude=False)
self.assertIsInstance(filtered2, FilterCollectionExpr)
try:
import pandas as pd
from odps.df import DataFrame
pd_df = | pd.DataFrame([['Col1', 1], ['Col2', 2]], columns=['Field1', 'Field2']) | pandas.DataFrame |
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
from datetime import datetime, timedelta
import re
import numpy as np
import pytest
from pandas._libs import iNaT
import pandas._libs.index as _index
import pandas as pd
from pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range
import pandas._testing as tm
def test_fancy_getitem():
dti = date_range(
freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)
)
s = Series(np.arange(len(dti)), index=dti)
assert s[48] == 48
assert s["1/2/2009"] == 48
assert s["2009-1-2"] == 48
assert s[datetime(2009, 1, 2)] == 48
assert s[Timestamp(datetime(2009, 1, 2))] == 48
with pytest.raises(KeyError, match=r"^'2009-1-3'$"):
s["2009-1-3"]
tm.assert_series_equal(
s["3/6/2009":"2009-06-05"], s[datetime(2009, 3, 6) : datetime(2009, 6, 5)]
)
def test_fancy_setitem():
dti = date_range(
freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)
)
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
assert s[48] == -1
s["1/2/2009"] = -2
assert s[48] == -2
s["1/2/2009":"2009-06-05"] = -3
assert (s[48:54] == -3).all()
def test_dti_reset_index_round_trip():
dti = date_range(start="1/1/2001", end="6/1/2001", freq="D")._with_freq(None)
d1 = DataFrame({"v": np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
assert d2.dtypes[0] == np.dtype("M8[ns]")
d3 = d2.set_index("index")
tm.assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=["Date", "Value"])
df = df.set_index("Date")
assert df.index[0] == stamp
assert df.reset_index()["Date"][0] == stamp
@pytest.mark.slow
def test_slice_locs_indexerror():
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10) for i in range(100000)]
s = Series(range(100000), times)
s.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)]
def test_slicing_datetimes():
# GH 7523
# unique
df = DataFrame(
np.arange(4.0, dtype="float64"),
index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 3, 4]],
)
result = df.loc[datetime(2001, 1, 1, 10) :]
tm.assert_frame_equal(result, df)
result = df.loc[: datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11) :]
expected = df.iloc[1:]
tm.assert_frame_equal(result, expected)
result = df.loc["20010101 11":]
tm.assert_frame_equal(result, expected)
# duplicates
df = DataFrame(
np.arange(5.0, dtype="float64"),
index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 2, 3, 4]],
)
result = df.loc[datetime(2001, 1, 1, 10) :]
tm.assert_frame_equal(result, df)
result = df.loc[: datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11) :]
expected = df.iloc[1:]
tm.assert_frame_equal(result, expected)
result = df.loc["20010101 11":]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_datetime_tz_pytz():
from pytz import timezone as tz
N = 50
# testing with timezone, GH #2785
rng = date_range("1/1/1990", periods=N, freq="H", tz="US/Eastern")
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
tm.assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
date = tz("US/Central").localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
tm.assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil():
from dateutil.tz import tzutc
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
tz = (
lambda x: tzutc() if x == "UTC" else gettz(x)
) # handle special case for utc in dateutil
N = 50
# testing with timezone, GH #2785
rng = date_range("1/1/1990", periods=N, freq="H", tz="America/New_York")
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
tm.assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
result[datetime(1990, 1, 1, 3, tzinfo=tz("America/Chicago"))] = 0
result[datetime(1990, 1, 1, 3, tzinfo=tz("America/Chicago"))] = ts[4]
tm.assert_series_equal(result, ts)
def test_getitem_setitem_datetimeindex():
N = 50
# testing with timezone, GH #2785
rng = date_range("1/1/1990", periods=N, freq="H", tz="US/Eastern")
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04:00:00"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04:00:00"] = 0
result["1990-01-01 04:00:00"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
tm.assert_series_equal(result, ts)
lb = "1990-01-01 04:00:00"
rb = "1990-01-01 07:00:00"
# GH#18435 strings get a pass from tzawareness compat
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
lb = "1990-01-01 04:00:00-0500"
rb = "1990-01-01 07:00:00-0500"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
# But we do not give datetimes a pass on tzawareness compat
# TODO: do the same with Timestamps and dt64
msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
naive = datetime(1990, 1, 1, 4)
with tm.assert_produces_warning(FutureWarning):
# GH#36148 will require tzawareness compat
result = ts[naive]
expected = ts[4]
assert result == expected
result = ts.copy()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4)] = 0
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4)] = ts[4]
tm.assert_series_equal(result, ts)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result = ts[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = 0
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = ts[4:8]
tm.assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
rb = datetime(1990, 1, 1, 7)
msg = r"Invalid comparison between dtype=datetime64\[ns, US/Eastern\] and datetime"
with pytest.raises(TypeError, match=msg):
# tznaive vs tzaware comparison is invalid
# see GH#18376, GH#18162
ts[(ts.index >= lb) & (ts.index <= rb)]
lb = pd.Timestamp(datetime(1990, 1, 1, 4)).tz_localize(rng.tzinfo)
rb = pd.Timestamp(datetime(1990, 1, 1, 7)).tz_localize(rng.tzinfo)
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result.iloc[4:8] = ts.iloc[4:8]
tm.assert_series_equal(result, ts)
# also test partial date slicing
result = ts["1990-01-02"]
expected = ts[24:48]
tm.assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-02"] = 0
result["1990-01-02"] = ts[24:48]
tm.assert_series_equal(result, ts)
def test_getitem_setitem_periodindex():
from pandas import period_range
N = 50
rng = period_range("1/1/1990", periods=N, freq="H")
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04"] = 0
result["1990-01-01 04"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts["1990-01-01 04":"1990-01-01 07"]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04":"1990-01-01 07"] = 0
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
tm.assert_series_equal(result, ts)
lb = "1990-01-01 04"
rb = "1990-01-01 07"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
# GH 2782
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result.iloc[4:8] = ts.iloc[4:8]
tm.assert_series_equal(result, ts)
def test_datetime_indexing():
index = date_range("1/1/2000", "1/7/2000")
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp("1/8/2000")
with pytest.raises(KeyError, match=re.escape(repr(stamp))):
s[stamp]
s[stamp] = 0
assert s[stamp] == 0
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
with pytest.raises(KeyError, match=re.escape(repr(stamp))):
s[stamp]
s[stamp] = 0
assert s[stamp] == 0
"""
test duplicates in time series
"""
@pytest.fixture
def dups():
dates = [
datetime(2000, 1, 2),
datetime(2000, 1, 2),
datetime(2000, 1, 2),
datetime(2000, 1, 3),
datetime(2000, 1, 3),
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 4),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
]
return Series(np.random.randn(len(dates)), index=dates)
def test_constructor(dups):
assert isinstance(dups, Series)
assert isinstance(dups.index, DatetimeIndex)
def test_is_unique_monotonic(dups):
assert not dups.index.is_unique
def test_index_unique(dups):
uniques = dups.index.unique()
expected = DatetimeIndex(
[
datetime(2000, 1, 2),
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
]
)
assert uniques.dtype == "M8[ns]" # sanity
tm.assert_index_equal(uniques, expected)
assert dups.index.nunique() == 4
# #2563
assert isinstance(uniques, DatetimeIndex)
dups_local = dups.index.tz_localize("US/Eastern")
dups_local.name = "foo"
result = dups_local.unique()
expected = DatetimeIndex(expected, name="foo")
expected = expected.tz_localize("US/Eastern")
assert result.tz is not None
assert result.name == "foo"
tm.assert_index_equal(result, expected)
# NaT, note this is excluded
arr = [1370745748 + t for t in range(20)] + [iNaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
arr = [
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)
] + [NaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
def test_duplicate_dates_indexing(dups):
ts = dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
tm.assert_series_equal(result, expected)
else:
tm.assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
| tm.assert_series_equal(cp, expected) | pandas._testing.assert_series_equal |
# Copyright 2021 Fedlearn authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# assume cwd is ./opensource/
import logging
import os
import socket
import sys
import pandas
sys.path.append(os.getcwd())
print(sys.path)
from core.entity.common.machineinfo import MachineInfo
from demos.random_forest.coordinator import RandomForestCoordinator
from demos.random_forest.client import RandomForestClient
# load and align train data
g1 = pandas.read_csv("data/classificationA/train0.csv")
g2 = pandas.read_csv("data/classificationA/train1.csv")
g3 = pandas.read_csv("data/classificationA/train2.csv")
uid = g1.loc[:, ["uid"]]
uid = | pandas.merge(uid, g2.loc[:, ["uid"]], on="uid", how="inner") | pandas.merge |
import numpy as np
import pandas as pd
from numba import njit
import pytest
from vectorbt import defaults
from vectorbt.utils import checks, config, decorators, math, array
from tests.utils import hash
# ############# config.py ############# #
class TestConfig:
def test_config(self):
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=False)
conf['b']['d'] = 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=True)
conf['a'] = 2
with pytest.raises(Exception) as e_info:
conf['d'] = 2
# go deeper
conf['b']['c'] = 2
with pytest.raises(Exception) as e_info:
conf['b']['d'] = 2
def test_merge_kwargs(self):
assert config.merge_kwargs({'a': 1}, {'b': 2}) == {'a': 1, 'b': 2}
assert config.merge_kwargs({'a': 1}, {'a': 2}) == {'a': 2}
assert config.merge_kwargs({'a': {'b': 2}}, {'a': {'c': 3}}) == {'a': {'b': 2, 'c': 3}}
assert config.merge_kwargs({'a': {'b': 2}}, {'a': {'b': 3}}) == {'a': {'b': 3}}
# ############# decorators.py ############# #
class TestDecorators:
def test_class_or_instancemethod(self):
class G:
@decorators.class_or_instancemethod
def g(self_or_cls):
if isinstance(self_or_cls, type):
return True # class
return False # instance
assert G.g()
assert not G().g()
def test_custom_property(self):
class G:
@decorators.custom_property(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_custom_method(self):
class G:
@decorators.custom_method(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_cached_property(self):
class G:
@decorators.cached_property(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
class G:
@decorators.cached_property
def cache_me(self): return np.random.uniform()
g = G()
# general caching
cached_number = g.cache_me
assert g.cache_me == cached_number
# clear_cache method
G.cache_me.clear_cache(g)
cached_number2 = g.cache_me
assert cached_number2 != cached_number
assert g.cache_me == cached_number2
# disabled locally
G.cache_me.disabled = True
cached_number3 = g.cache_me
assert cached_number3 != cached_number2
assert g.cache_me != cached_number3
G.cache_me.disabled = False
# disabled globally
defaults.caching = False
cached_number4 = g.cache_me
assert cached_number4 != cached_number3
assert g.cache_me != cached_number4
defaults.caching = True
def test_cached_method(self):
class G:
@decorators.cached_method(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
class G:
@decorators.cached_method
def cache_me(self, b=10): return np.random.uniform() * 10
g = G()
# general caching
cached_number = g.cache_me()
assert g.cache_me() == cached_number
# clear_cache method
G.cache_me.clear_cache(g)
cached_number2 = g.cache_me()
assert cached_number2 != cached_number
assert g.cache_me() == cached_number2
# disabled locally
G.cache_me.disabled = True
cached_number3 = g.cache_me()
assert cached_number3 != cached_number2
assert g.cache_me() != cached_number3
G.cache_me.disabled = False
# disabled globally
defaults.caching = False
cached_number4 = g.cache_me()
assert cached_number4 != cached_number3
assert g.cache_me() != cached_number4
defaults.caching = True
# disabled by non-hashable args
cached_number5 = g.cache_me(b=np.zeros(1))
assert cached_number5 != cached_number4
assert g.cache_me(b=np.zeros(1)) != cached_number5
def test_traverse_attr_kwargs(self):
class A:
@decorators.custom_property(some_key=0)
def a(self): pass
class B:
@decorators.cached_property(some_key=0, child_cls=A)
def a(self): pass
@decorators.custom_method(some_key=1)
def b(self): pass
class C:
@decorators.cached_method(some_key=0, child_cls=B)
def b(self): pass
@decorators.custom_property(some_key=1)
def c(self): pass
assert hash(str(decorators.traverse_attr_kwargs(C))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key'))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=1))) == 703070484833749378
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=(0, 1)))) == 16728515581653529580
# ############# checks.py ############# #
class TestChecks:
def test_is_pandas(self):
assert not checks.is_pandas(0)
assert not checks.is_pandas(np.array([0]))
assert checks.is_pandas(pd.Series([1, 2, 3]))
assert checks.is_pandas(pd.DataFrame([1, 2, 3]))
def test_is_series(self):
assert not checks.is_series(0)
assert not checks.is_series(np.array([0]))
assert checks.is_series(pd.Series([1, 2, 3]))
assert not checks.is_series(pd.DataFrame([1, 2, 3]))
def test_is_frame(self):
assert not checks.is_frame(0)
assert not checks.is_frame(np.array([0]))
assert not checks.is_frame(pd.Series([1, 2, 3]))
assert checks.is_frame(pd.DataFrame([1, 2, 3]))
def test_is_array(self):
assert not checks.is_array(0)
assert checks.is_array(np.array([0]))
assert checks.is_array(pd.Series([1, 2, 3]))
assert checks.is_array(pd.DataFrame([1, 2, 3]))
def test_is_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
assert not checks.is_numba_func(test_func)
assert checks.is_numba_func(test_func_nb)
def test_is_hashable(self):
assert checks.is_hashable(2)
assert not checks.is_hashable(np.asarray(2))
def test_is_index_equal(self):
assert checks.is_index_equal(
pd.Index([0]),
pd.Index([0])
)
assert not checks.is_index_equal(
pd.Index([0]),
pd.Index([1])
)
assert not checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0])
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.Index([0])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.MultiIndex.from_arrays([[0], [1]])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2'])
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name3', 'name4'])
)
def test_is_default_index(self):
assert checks.is_default_index(pd.DataFrame([[1, 2, 3]]).columns)
assert checks.is_default_index(pd.Series([1, 2, 3]).to_frame().columns)
assert checks.is_default_index(pd.Index([0, 1, 2]))
assert not checks.is_default_index(pd.Index([0, 1, 2], name='name'))
def test_is_equal(self):
assert checks.is_equal(np.arange(3), np.arange(3), np.array_equal)
assert not checks.is_equal(np.arange(3), None, np.array_equal)
assert not checks.is_equal(None, np.arange(3), np.array_equal)
assert checks.is_equal(None, None, np.array_equal)
def test_assert_value_in(self):
checks.assert_value_in(0, (0, 1))
with pytest.raises(Exception) as e_info:
checks.assert_value_in(2, (0, 1))
def test_assert_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
checks.assert_numba_func(test_func_nb)
with pytest.raises(Exception) as e_info:
checks.assert_numba_func(test_func)
def test_assert_not_none(self):
checks.assert_not_none(0)
with pytest.raises(Exception) as e_info:
checks.assert_not_none(None)
def test_assert_type(self):
checks.assert_type(0, int)
checks.assert_type(np.zeros(1), (np.ndarray, pd.Series))
checks.assert_type(pd.Series([1, 2, 3]), (np.ndarray, pd.Series))
with pytest.raises(Exception) as e_info:
checks.assert_type(pd.DataFrame([1, 2, 3]), (np.ndarray, pd.Series))
def test_assert_subclass(self):
class A:
pass
class B(A):
pass
class C(B):
pass
checks.assert_subclass(B, A)
checks.assert_subclass(C, B)
checks.assert_subclass(C, A)
with pytest.raises(Exception) as e_info:
checks.assert_subclass(A, B)
def test_assert_same_type(self):
checks.assert_same_type(0, 1)
checks.assert_same_type(np.zeros(1), np.empty(1))
with pytest.raises(Exception) as e_info:
checks.assert_type(0, np.zeros(1))
def test_assert_dtype(self):
checks.assert_dtype(np.zeros(1), np.float)
checks.assert_dtype(pd.Series([1, 2, 3]), np.int)
checks.assert_dtype(pd.DataFrame({'a': [1, 2], 'b': [3, 4]}), np.int)
with pytest.raises(Exception) as e_info:
checks.assert_dtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.int)
def test_assert_subdtype(self):
checks.assert_subdtype([0], np.number)
checks.assert_subdtype(np.array([1, 2, 3]), np.number)
checks.assert_subdtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.number)
with pytest.raises(Exception) as e_info:
checks.assert_subdtype(np.array([1, 2, 3]), np.float)
with pytest.raises(Exception) as e_info:
checks.assert_subdtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.float)
def test_assert_same_dtype(self):
checks.assert_same_dtype([1], [1, 1, 1])
checks.assert_same_dtype(pd.Series([1, 2, 3]), pd.DataFrame([[1, 2, 3]]))
checks.assert_same_dtype(pd.DataFrame([[1, 2, 3.]]), pd.DataFrame([[1, 2, 3.]]))
with pytest.raises(Exception) as e_info:
checks.assert_same_dtype(pd.DataFrame([[1, 2, 3]]), pd.DataFrame([[1, 2, 3.]]))
def test_assert_ndim(self):
checks.assert_ndim(0, 0)
checks.assert_ndim(np.zeros(1), 1)
checks.assert_ndim(pd.Series([1, 2, 3]), (1, 2))
checks.assert_ndim(pd.DataFrame([1, 2, 3]), (1, 2))
with pytest.raises(Exception) as e_info:
checks.assert_ndim(np.zeros((3, 3, 3)), (1, 2))
def test_assert_same_len(self):
checks.assert_same_len([[1]], [[2]])
checks.assert_same_len([[1]], [[2, 3]])
with pytest.raises(Exception) as e_info:
checks.assert_same_len([[1]], [[2], [3]])
def test_assert_same_shape(self):
checks.assert_same_shape(0, 1)
checks.assert_same_shape([1, 2, 3], np.asarray([1, 2, 3]))
checks.assert_same_shape([1, 2, 3], pd.Series([1, 2, 3]))
checks.assert_same_shape(np.zeros((3, 3)), pd.Series([1, 2, 3]), axis=0)
checks.assert_same_shape(np.zeros((2, 3)), pd.Series([1, 2, 3]), axis=(1, 0))
with pytest.raises(Exception) as e_info:
checks.assert_same_shape(np.zeros((2, 3)), pd.Series([1, 2, 3]), axis=(0, 1))
def test_assert_same_index(self):
index = ['a', 'b', 'c']
checks.assert_same_index(pd.Series([1, 2, 3], index=index), pd.DataFrame([1, 2, 3], index=index))
with pytest.raises(Exception) as e_info:
checks.assert_same_index(pd.Series([1, 2, 3]), pd.DataFrame([1, 2, 3], index=index))
def test_assert_same_columns(self):
columns = ['a', 'b', 'c']
checks.assert_same_columns(pd.DataFrame([[1, 2, 3]], columns=columns), pd.DataFrame([[1, 2, 3]], columns=columns))
with pytest.raises(Exception) as e_info:
checks.assert_same_columns(pd.DataFrame([[1, 2, 3]]), pd.DataFrame([[1, 2, 3]], columns=columns))
def test_assert_same_meta(self):
index = ['x', 'y', 'z']
columns = ['a', 'b', 'c']
checks.assert_same_meta(np.array([1, 2, 3]), np.array([1, 2, 3]))
checks.assert_same_meta(pd.Series([1, 2, 3], index=index), | pd.Series([1, 2, 3], index=index) | pandas.Series |
import pandas
import os
import re
import numpy as np
import math
import warnings
from modin.error_message import ErrorMessage
from modin.engines.base.io import BaseIO
from modin.data_management.utils import compute_chunksize
from modin import __execution_engine__
if __execution_engine__ == "Ray":
import ray
PQ_INDEX_REGEX = re.compile("__index_level_\d+__") # noqa W605
class RayIO(BaseIO):
frame_partition_cls = None
query_compiler_cls = None
frame_cls = None
# IMPORTANT NOTE
#
# Specify these in the child classes to extend the functionality from this class.
# The tasks must return a very specific set of objects in the correct order to be
# correct. The following must be returned from these remote tasks:
# 1.) A number of partitions equal to the `num_partitions` value. If there is not
# enough data to fill the number of partitions, returning empty partitions is
# okay as well.
# 2.) The index object if the index is anything but the default type (`RangeIndex`),
# otherwise return the length of the object in the remote task and the logic
# will build the `RangeIndex` correctly. May of these methods have a `index_col`
# parameter that will tell you whether or not to use the default index.
read_parquet_remote_task = None
# For reading parquet files in parallel, this task should read based on the `cols`
# value in the task signature. Each task will read a subset of the columns.
#
# Signature: (path, cols, num_splits, kwargs)
read_json_remote_task = None
# For reading JSON files and other text files in parallel, this task should read
# based on the offsets in the signature (`start` and `stop` are byte offsets).
#
# Signature: (filepath, num_splits, start, stop, kwargs)
read_hdf_remote_task = None
# For reading HDF5 files in parallel, this task should read based on the `columns`
# parameter in the task signature. Each task will read a subset of the columns.
#
# Signature: (path_or_buf, columns, num_splits, kwargs)
read_feather_remote_task = None
# For reading Feather file format in parallel, this task should read based on the
# `columns` parameter in the task signature. Each task will read a subset of the
# columns.
#
# Signature: (path, columns, num_splits)
read_sql_remote_task = None
# For reading SQL tables in parallel, this task should read a number of rows based
# on the `sql` string passed to the task. Each task will be given a different LIMIT
# and OFFSET as a part of the `sql` query string, so the tasks should perform only
# the logic required to read the SQL query and determine the Index (information
# above).
#
# Signature: (num_splits, sql, con, index_col, kwargs)
@classmethod
def read_parquet(cls, path, engine, columns, **kwargs):
"""Load a parquet object from the file path, returning a DataFrame.
Ray DataFrame only supports pyarrow engine for now.
Args:
path: The filepath of the parquet file.
We only support local files for now.
engine: Ray only support pyarrow reader.
This argument doesn't do anything for now.
kwargs: Pass into parquet's read_pandas function.
Notes:
ParquetFile API is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/parquet.html
"""
from pyarrow.parquet import ParquetFile, ParquetDataset
if cls.read_parquet_remote_task is None:
return super(RayIO, cls).read_parquet(path, engine, columns, **kwargs)
file_path = path
if os.path.isdir(path):
directory = True
partitioned_columns = set()
# We do a tree walk of the path directory because partitioned
# parquet directories have a unique column at each directory level.
# Thus, we can use os.walk(), which does a dfs search, to walk
# through the different columns that the data is partitioned on
for (root, dir_names, files) in os.walk(path):
if dir_names:
partitioned_columns.add(dir_names[0].split("=")[0])
if files:
# Metadata files, git files, .DSStore
if files[0][0] == ".":
continue
file_path = os.path.join(root, files[0])
break
partitioned_columns = list(partitioned_columns)
else:
directory = False
if not columns:
if directory:
# Path of the sample file that we will read to get the remaining
# columns.
from pyarrow import ArrowIOError
try:
pd = ParquetDataset(file_path)
except ArrowIOError:
pd = ParquetDataset(path)
column_names = pd.schema.names
else:
pf = ParquetFile(path)
column_names = pf.metadata.schema.names
columns = [name for name in column_names if not PQ_INDEX_REGEX.match(name)]
# Cannot read in parquet file by only reading in the partitioned column.
# Thus, we have to remove the partition columns from the columns to
# ensure that when we do the math for the blocks, the partition column
# will be read in along with a non partition column.
if columns and directory and any(col in partitioned_columns for col in columns):
columns = [col for col in columns if col not in partitioned_columns]
# If all of the columns wanted are partition columns, return an
# empty dataframe with the desired columns.
if len(columns) == 0:
return cls.from_pandas(pandas.DataFrame(columns=partitioned_columns))
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
num_splits = min(len(columns), num_partitions)
# Each item in this list will be a list of column names of the original df
column_splits = (
len(columns) // num_partitions
if len(columns) % num_partitions == 0
else len(columns) // num_partitions + 1
)
col_partitions = [
columns[i : i + column_splits]
for i in range(0, len(columns), column_splits)
]
column_widths = [len(c) for c in col_partitions]
# Each item in this list will be a list of columns of original df
# partitioned to smaller pieces along rows.
# We need to transpose the oids array to fit our schema.
# TODO (williamma12): This part can be parallelized even more if we
# separate the partitioned parquet file code path from the default one.
# The workers return multiple objects for each part of the file read:
# - The first n - 2 objects are partitions of data
# - The n - 1 object is the length of the partition.
# - The nth object is the dtypes of the partition. We combine these to
# form the final dtypes below.
blk_partitions = np.array(
[
cls.read_parquet_remote_task._remote(
args=(path, cols + partitioned_columns, num_splits, kwargs),
num_return_vals=num_splits + 2,
)
if directory and cols == col_partitions[len(col_partitions) - 1]
else cls.read_parquet_remote_task._remote(
args=(path, cols, num_splits, kwargs),
num_return_vals=num_splits + 2,
)
for cols in col_partitions
]
).T
# Metadata
index_len = ray.get(blk_partitions[-2][0])
index = pandas.RangeIndex(index_len)
index_chunksize = compute_chunksize(
| pandas.DataFrame(index=index) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from sklearn import metrics
from epiquark import ScoreCalculator
def test_non_case_imputation(shared_datadir, paper_example_score: ScoreCalculator) -> None:
cases = pd.read_csv(shared_datadir / "paper_example/cases_long.csv")
imputed = paper_example_score._impute_non_case(cases)
imputed_expected = pd.read_csv(shared_datadir / "paper_example/non_case_imputed_long.csv")
pd.testing.assert_frame_equal(imputed, imputed_expected, check_dtype=False)
def test_p_di_given_x(shared_datadir, paper_example_score: ScoreCalculator) -> None:
p_di_given_x = paper_example_score._p_di_given_x()
p_di_given_x_expected = pd.read_csv(shared_datadir / "paper_example/p_di_given_x.csv")
pd.testing.assert_frame_equal(p_di_given_x, p_di_given_x_expected, check_dtype=False)
def test_p_sj_given_x(shared_datadir, paper_example_score: ScoreCalculator) -> None:
p_sj_given_x = paper_example_score._p_sj_given_x()
p_sj_given_x_expected = pd.read_csv(shared_datadir / "paper_example/p_sj_given_x_long.csv")
pd.testing.assert_frame_equal(p_sj_given_x, p_sj_given_x_expected, check_dtype=False)
def test_p_di_given_sj(shared_datadir, paper_example_score: ScoreCalculator) -> None:
p_di_given_sj_x = paper_example_score._p_di_given_sj()
p_di_given_sj_x_expected = pd.read_csv(shared_datadir / "paper_example/p_di_given_sj.csv")
str_cols = list(p_di_given_sj_x.select_dtypes(exclude="number").columns)
pd.testing.assert_frame_equal(
p_di_given_sj_x.sort_values(by=str_cols).reset_index(drop=True),
p_di_given_sj_x_expected.sort_values(by=str_cols).reset_index(drop=True),
check_dtype=False,
)
def test_p_hat_di(shared_datadir, paper_example_score: ScoreCalculator) -> None:
p_hat_di = (
paper_example_score._p_hat_di().sort_values(by=["x1", "x2", "d_i"]).reset_index(drop=True)
)
p_hat_di_expected = (
| pd.read_csv(shared_datadir / "paper_example/p_hat_di.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 14 10:59:05 2021
@author: franc
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
import json
from collections import Counter, OrderedDict
import math
import torchtext
from torchtext.data import get_tokenizer
from googletrans import Translator
# from deep_translator import GoogleTranslator
# pip install googletrans==4.0.0rc1
import pickle
# pip install pickle-mixin
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet as wn
# python -m spacy download es_core_news_sm
import spacy
import fasttext.util
import contractions
import re # libreria de expresiones regulares
import string # libreria de cadena de caracteres
import itertools
import sys
sys.path.append("/tmp/TEST")
from treetagger import TreeTagger
import pathlib
from scipy.spatial import distance
from scipy.stats import kurtosis
from scipy.stats import skew
class NLPClass:
def __init__(self):
self.numero = 1
nltk.download('wordnet')
def translations_dictionary(self, df_translate=None, path=""):
'''
It appends to a dictionary different animals names in spanish and
english languages. It adds them so that english animals names appear
in WordNet synset.
Parameters
----------
df_translate : pandas.dataframe, optional.
If it's not None, the rows are appended. Otherwise it's
initialized and then the rows are appended.
The default is None.
path : string, optional
The path where to save the pickle file with the dictionary. Unless
path is empty.
The default is "".
Returns
-------
df_translate : pandas.dataframe.
Pandas.dataframe with the new rows appended.
'''
df_auxiliar = pd.DataFrame(columns=['spanish','english'])
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yaguareté"], 'english': ["jaguar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["llama"], 'english': ["llama"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["picaflor"], 'english': ["hummingbird"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["chita"], 'english': ["cheetah"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["torcaza"], 'english': ["dove"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yacaré"], 'english': ["alligator"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["corvina"], 'english': ["croaker"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["vizcacha"], 'english': ["viscacha"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append( | pd.DataFrame({'spanish': ["orca"], 'english': ["killer_whale"]}) | pandas.DataFrame |
import uuid
import traceback
import os
import numpy as np
import pandas
import nrrd
import glob
import argparse
import random
from PIL import Image
import csv
from shutil import rmtree
from collections import defaultdict
from keras.preprocessing.image import ImageDataGenerator, Iterator
from keras.utils import to_categorical
from tqdm import tqdm
import matplotlib.pyplot as plt
from segmentation import calculate_percentile_slice, select_slice, bounding_box, crop, resize, calculate_volume, calculate_top3_slices
from config import config
from filenames import IMAGE, SEGMENTATION, T1, T2, T1C
clinical_features = [
"age",
"sex",
"location"
]
def all_input(t1, t2, t1c, features, labels):
t1_image = np.array(t1)
t1_image = np.rollaxis(t1_image, 0, 3)
t2_image = np.array(t2)
t2_image = np.rollaxis(t2_image, 0, 3)
t1c_image = np.array(t1c)
t1c_image = np.rollaxis(t1c_image, 0, 3)
return (t1_image, t2_image, t1c_image), features, labels
def t1_input(t1, t2, t1c, features, labels):
t1_image = np.array(t1)
t1_image = np.rollaxis(t1_image, 0, 3)
return (t1_image, None, None), [], labels
def t1c_input(t1, t2, t1c, features, labels):
t1c_image = np.array(t1c)
t1c_image = np.rollaxis(t1c_image, 0, 3)
return (None, None, t1c_image), [], labels
def t2_input(t1, t2, t1c, features, labels):
t2_image = np.array(t2)
t2_image = np.rollaxis(t2_image, 0, 3)
return (None, t2_image, None), [], labels
def t1_t2_input(t1, t2, t1c, features, labels):
t1_image = np.array(t1)
t1_image = np.rollaxis(t1_image, 0, 3)
t2_image = np.array(t2)
t2_image = np.rollaxis(t2_image, 0, 3)
return (t1_image, t2_image, None), [], labels
def t1c_t2_input(t1, t2, t1c, features, labels):
t1c_image = np.array(t1c)
t1c_image = np.rollaxis(t1c_image, 0, 3)
t2_image = np.array(t2)
t2_image = np.rollaxis(t2_image, 0, 3)
return (None, t2_image, t1c_image), [], labels
def t1_features_input(t1, t2, t1c, features, labels):
t1_image = np.array(t1)
t1_image = np.rollaxis(t1_image, 0, 3)
return (t1_image, None, None), features, labels
def t2_features_input(t1, t2, t1c, features, labels):
t2_image = np.array(t2)
t2_image = np.rollaxis(t2_image, 0, 3)
return (None, t2_image, None), features, labels
def features_input(t1, t2, t1c, features, labels):
return (None, None, None), features, labels
def t1c_features_input(t1, t2, t1c, features, labels):
t1c_image = np.array(t1c)
t1c_image = np.rollaxis(t1c_image, 0, 3)
return (None, None, t1c_image), features, labels
INPUT_FORMS = {
"all": all_input,
"t1": t1_input,
"t2": t2_input,
"t1c": t1c_input,
"t1-t2": t1_t2_input,
"t1c-t2": t1c_t2_input,
"t1-features": t1_features_input,
"t2-features": t2_features_input,
"t1c-features": t1c_features_input,
"features": features_input,
}
INPUT_FORM_PARAMETERS = {
"all": {
"t1": True,
"t2": True,
"t1c": True,
"features": True,
},
"t1": {
"t1": True,
"t2": False,
"t1c": False,
"features": False,
},
"t2": {
"t1": False,
"t2": True,
"t1c": False,
"features": False,
},
"t1c": {
"t1": False,
"t2": False,
"t1c": True,
"features": False,
},
"t1-t2": {
"t1": True,
"t2": True,
"t1c": False,
"features": False,
},
"t1c-t2": {
"t1": False,
"t2": True,
"t1c": True,
"features": False,
},
"t1-features": {
"t1": True,
"t2": False,
"t1c": False,
"features": True,
},
"t2-features": {
"t1": False,
"t2": True,
"t1c": False,
"features": True,
},
"t1c-features": {
"t1": False,
"t2": False,
"t1c": True,
"features": True,
},
"features": {
"t1": False,
"t2": False,
"t1c": False,
"features": True,
},
}
#available = {}
#with open(config.SEQ_AVAIL) as seq_avail:
# reader = csv.reader(seq_avail)
# headers = next(reader, None)
# for h in headers:
# available[h] = []
# for row in reader:
# available['pd'].append(row[1])
# available['t1'].append(row[2])
# available['t1c'].append(row[3])
# available['t2'].append(row[4])
# available['t2-t1'].append(row[5])
# available['t2-t1c'].append(row[6])
class Features(Iterator):
"""
tf.keras.preprocessing.image.Iterator(n, batch_size, shuffle, seed)
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, features, shuffle, seed):
super(Features, self).__init__(len(features), config.BATCH_SIZE, shuffle, hash(seed) % 2**32 )
self.features = np.array(features)
def _get_batches_of_transformed_samples(self, index_array):
return self.features[index_array] #get features of given batch
def next(self):
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array)
class Dataset(object):
def __init__(self, images, features, labels, names, augment=False, shuffle=False, seed=None, input_form="all", three=False):
self.shuffle = shuffle
self.seed = seed
self.augment = augment
self.input_form = input_form
self.names = names
self.parameters = INPUT_FORM_PARAMETERS[input_form] #ex. {"t1": True, "t2": False, "t1c": False, "features": False}
features = list(zip(*features))
self.labels = labels
self.features = features
self.features_size = 0
self.features_int = []
self.features_list = []
for row in self.features: #ex. [0,1,[0,0,0,1,0,0,0,0,0,0,0,0,0,0]]
new_row_int = []
new_row_list = []
for item in row:
if not isinstance(item, list):
new_row_int.append(item) # [0,43]
else: #if it's a list
new_row_list = item #[0,0,0,1,0,0,0,0,0,0,0,0,0,0]
self.features_int.append(new_row_int) #[[0,43], [1,12],...]
self.features_list.append(new_row_list) #[[0,0,0,1,0,0,0,0,0,0,0,0,0,0],[0,0,0,1,0,0,0,0,0,0,0,0,0,0],...]
if self.parameters["features"]:
self.features_size = len(features[0]) #3 -> [age, sex, location]
self.features_int_generator = Features(self.features_int, self.shuffle, self.seed)
self.features_list_generator = Features(self.features_list, self.shuffle, self.seed)
self.features_generator = Features(self.features, self.shuffle, self.seed)
self.n = len(labels)
unique, index, inverse, counts = np.unique(self.labels, return_index=True, return_inverse=True, return_counts=True)
self.y = inverse
if three:
self.y = to_categorical(inverse)
self.classes = inverse
self.class_indices = { u: i for i, u in enumerate(unique) }
separate_images = list(zip(*images))
if self.parameters["t1"]:
self.t1 = np.array(separate_images[0])
self.datagen1 = self._get_data_generator()
if self.parameters["t2"]:
self.t2 = np.array(separate_images[1])
self.datagen2 = self._get_data_generator()
if self.parameters["t1c"]:
self.t1c = np.array(separate_images[2])
self.datagen1c = self._get_data_generator()
self.reset()
def __len__(self):
return self.n
def __iter__(self):
return self
def __next__(self):
return self.next()
def reset(self):
if self.parameters["features"]:
self.features_int_generator = Features(self.features_int, self.shuffle, self.seed)
self.features_list_generator = Features(self.features_list, self.shuffle, self.seed)
self.features_generator = Features(self.features, self.shuffle, self.seed)
if self.parameters["t1"]:
self.generator_t1 = self.datagen1.flow(
x=self.t1,
y=self.y,
batch_size=config.BATCH_SIZE,
shuffle=self.shuffle,
seed=hash(self.seed) % 2**32,
)
if self.parameters["t2"]:
self.generator_t2 = self.datagen2.flow(
x=self.t2,
y=self.y,
batch_size=config.BATCH_SIZE,
shuffle=self.shuffle,
seed=hash(self.seed) % 2**32,
)
if self.parameters["t1c"]:
self.generator_t1c = self.datagen1c.flow(
x=self.t1c,
y=self.y,
batch_size=config.BATCH_SIZE,
shuffle=self.shuffle,
seed=hash(self.seed) % 2**32,
)
self.labels_generator = Features(self.y, self.shuffle, self.seed)
def _get_data_generator(self):
if self.augment:
return ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
)
return ImageDataGenerator(
rescale=1. / 255,
)
def next(self):
labels = self.labels_generator.next()
inputs = list()
if self.parameters["t2"]:
inputs.append(self.generator_t2.next()[0])
if self.parameters["t1"]:
inputs.append(self.generator_t1.next()[0])
if self.parameters["t1c"]:
inputs.append(self.generator_t1c.next()[0])
if self.parameters["features"]:
inputs.append([self.features_int_generator.next(),self.features_list_generator.next()])
#inputs.append(self.features_generator.next())
if len(inputs) == 1:
inputs = inputs[0]
return (inputs, labels)
def get_names(self):
return self.names
def outcome_feature(row, label):
label = row[label]
features = [ row[f] for f in clinical_features ]
return label, features #(label, [age, sex, location]) -> ex. (1, [32, 0, [0,0,0,0,1,0,0,0,0,0,0,0,0,0]])
LABEL_FORMS = {
"outcome_3": outcome_feature,
"outcome_pos": outcome_feature,
"outcome_neg": outcome_feature
}
def get_label_features(row, label="outcome"):
"""returns label, features, sample name"""
return (*LABEL_FORMS[label](row, label), row.name) #(outcome_feature(row, label), row.name) -> (label, features, patientID)
def input_data_form(t1, t2, t1c, features, labels, input_form=config.INPUT_FORM):
images, features, labels = INPUT_FORMS[input_form](t1, t2, t1c, features, labels) #ex. t2_input(t1, t2, t1c, features, labels)
#ex. for t2 input -> (None, t2_image, None), [], labels
#ex. for features input -> (None, None, None), features, labels
return images, features, labels
def load_image(image_path, segmentation_path, verbose=False):
image, _ = nrrd.read(image_path) #import image nrrd
segmentation, _ = nrrd.read(segmentation_path) #import segmentation nrrd
if verbose:
print("""
image: {}
seg: {}
""".format(image.shape, segmentation.shape))
return [mask_image_percentile(image, segmentation, 100, a) for a in (0, 1, 2)] #3 masked images, 1 for each axis. changed to use the "actual" image on each channel, cuts lack appropriate resolution
#return multicut_1axis_mask(image, segmentation, axis=2)
def mask_image_percentile(image, segmentation, percentile=100, axis=2):
if image.ndim == 4:
print("4dim")
image = image[0]
plane = calculate_percentile_slice(segmentation, percentile, axis) #find largest slice
image, segmentation = select_slice(image, segmentation, plane, axis) #select the largest slide
bounds = bounding_box(segmentation) #create a bounding box the size of the segmentation
image, segmentation = crop(image, segmentation, bounds) #crop to size of the segmentation
masked = image * segmentation #mask everything in image except what was segmented
masked = resize(masked, (config.IMAGE_SIZE, config.IMAGE_SIZE)) #resize to desired image size
#plt.imsave("data_gen_images/" + str(uuid.uuid4()) + ".png", masked)
#print(np.isnan(masked).any())
return masked
def multicut_1axis_mask(image, segmentation, axis=2):
plane1, plane2, plane3 = calculate_top3_slices(segmentation, axis) #find largest slice
if image.ndim == 4:
print("4dim")
image = image[0]
image1, segmentation1 = select_slice(image, segmentation, plane1, axis) #select the largest slice
bounds = bounding_box(segmentation1) #create a bounding box the size of the segmentation
image1, segmentation1 = crop(image1, segmentation1, bounds) #crop to size of the segmentation
masked1 = image1 * segmentation1 #mask everything in image except what was segmented
masked1 = resize(masked1, (config.IMAGE_SIZE, config.IMAGE_SIZE)) #resize to desired image size
image2, segmentation2 = select_slice(image, segmentation, plane2, axis) #select the second largest slice
bounds = bounding_box(segmentation2) #create a bounding box the size of the segmentation
image2, segmentation2 = crop(image2, segmentation2, bounds) #crop to size of the segmentation
masked2 = image2 * segmentation2 #mask everything in image except what was segmented
masked2 = resize(masked2, (config.IMAGE_SIZE, config.IMAGE_SIZE)) #resize to desired image size
image3, segmentation3 = select_slice(image, segmentation, plane3, axis) #select the third largest slice
bounds = bounding_box(segmentation3) #create a bounding box the size of the segmentation
image3, segmentation3 = crop(image3, segmentation3, bounds) #crop to size of the segmentation
masked3 = image3 * segmentation3 #mask everything in image except what was segmented
masked3 = resize(masked3, (config.IMAGE_SIZE, config.IMAGE_SIZE)) #resize to desired image size
#plt.imsave("/Volumes/external/datagentest.png", masked1)
return [masked1, masked2, masked3]
SHAPES_OUTPUT = """
SHAPES
{}:"""
def generate_from_features(df, input_form=config.INPUT_FORM, label_form="outcome", verbose=False, source=config.PREPROCESSED_DIR):
#determine which features you need to get
#ex. {"t1": True, "t2": False, "t1c": False, "features": False}
parameters = INPUT_FORM_PARAMETERS[input_form]
for index, row in tqdm(df.iterrows(), total=len(df)): #for each patient
print(index)
t1_image_file = os.path.join(source, "{}-{}-{}".format(index, T1, IMAGE))
t1_seg_file = os.path.join(source, "{}-{}-{}".format(index, T1, SEGMENTATION))
t2_image_file = os.path.join(source, "{}-{}-{}".format(index, T2, IMAGE))
t2_seg_file = os.path.join(source, "{}-{}-{}".format(index, T2, SEGMENTATION))
t1c_image_file = os.path.join(source, "{}-{}-{}".format(index, T1C, IMAGE))
t1c_seg_file = os.path.join(source, "{}-{}-{}".format(index, T1C, SEGMENTATION))
t1_masked = None
t2_masked = None
t1c_masked = None
if parameters["t1"]:
try:
print("I'm doing the thing.")
if verbose:
print(SHAPES_OUTPUT.format("t1"))
t1_masked = load_image(t1_image_file, t1_seg_file, verbose=verbose)
#t1c_masked = load_image(t1c_image_file, t1c_seg_file, verbose=verbose)
except Exception as e:
print()
print("#" * 80)
print("Exception occurred for: {}\n{}".format(row, e))
print("T1 image unavailable")
print(traceback.format_exc())
continue
if parameters["t2"]:
try:
print("I'm doing the thing.")
if verbose:
print(SHAPES_OUTPUT.format("t2"))
t2_masked = load_image(t2_image_file, t2_seg_file, verbose=verbose)
#t1c_masked = load_image(t1c_image_file, t1c_seg_file, verbose=verbose)
print("I'm doing the thing.")
#if verbose:
# print(SHAPES_OUTPUT.format("t1c"))
#t1c_masked = load_image(t1c_image_file, t1c_seg_file, verbose=verbose)
except Exception as e:
print()
print("#" * 80)
print("Exception occurred for: {}\n{}".format(row, e))
print("T2 image unavailable")
print(traceback.format_exc())
continue
if parameters["t1c"]:
try:
print("I'm doing the thing.")
if verbose:
print(SHAPES_OUTPUT.format("t1c"))
#t1c_masked = load_image(t1c_image_file, t1c_seg_file, verbose=verbose)
except Exception as e:
print()
print("#" * 80)
print("Exception occurred for: {}\n{}".format(row, e))
print("T1C image unavailable")
print(traceback.format_exc())
continue
# if parameters["features"]:
# try:
# print("I'm doing the thing.")
# if verbose:
# print(SHAPES_OUTPUT.format("t1c"))
# t1c_masked = load_image(t1c_image_file, t1c_seg_file, verbose=verbose)
# except Exception as e:
# print()
# print("#" * 80)
# print("Exception occurred for: {}\n{}".format(row, e))
# print("T1C image unavailable")
# print(traceback.format_exc())
# continue
#ex. (1, [32, 0, [0,0,0,0,1,0,0,0,0,0,0,0,0,0]], "bone-penn-453")
labels, features, name = get_label_features(row, label=label_form)
#ex. for t2 input -> (None, t2_image, None), [], labels
#ex. for features input -> (None, None, None), features, labels
images, features, labels = input_data_form(t1_masked, t2_masked, t1c_masked, features, labels, input_form=input_form)
#ex. (None, t2_image, None), [], labels, "bone-penn-453"
yield images, features, labels, name
def sort(validation_fraction=0.2, test_fraction=0.1, seed=None, label_form="outcome", input_form="all"):
f = pandas.read_pickle(config.FEATURES) #pickle with all training features for all available patients
# print(f)
# print(len(f))
# f = f.drop('bone-penn-479')
# print(len(f))
train_fraction = 1 - validation_fraction - test_fraction #fraction of points going to training set
#filter data set to include only patients with desired studies available
#input_form_map = {
# "all": lambda f: f[f.index.isin(available['t1']).isin(available['t2']).isin(available['t1c'])],
# "t1": lambda f: f[f.index.isin(available['t2-t1'])], #only using patients with T1 and T2
# "t2": lambda f: f[f.index.isin(available['t2-t1'])], #only using patients with T1 and T2
# "t1c": lambda f: f[f.index.isin(available['t1c'])],
# "t2-t1": lambda f: f[f.index.isin(available['t2-t1'])],
# "t1c-t2": lambda f: f[f.index.isin(available['t1c']) & f.index.isin(available['t2'])],
# "t1-features": lambda f: f[f.index.isin(available['t1'])],
# "t2-features": lambda f: f[f.index.isin(available['t2'])],
# "t1c-features": lambda f: f[f.index.isin(available['t1c'])],
# "features": lambda f: f
# }
#f = input_form_map[input_form](f) #CSV with all training features for patients with given imaging modality
remaining = f.copy()
sort_dict = {
"train": train_fraction,
"validation": validation_fraction,
"test": test_fraction,
}
# calculate goal numbers for train/validation/test by label properties
labels = f[label_form].unique() # 1 (malignant) or 0 (benign)
goal_sort = dict()
for l in labels:
label_fraction = len(remaining[remaining[label_form] == l])/len(remaining) # no. pts. with given label/total no. of pts. = % benign (ex.)
for s in ["train", "validation", "test"]:
goal_sort[(l, s)] = int(len(remaining) * label_fraction * sort_dict[s]) #ex. goal_sort[(benign, train)] = total no. of pts. * %benign * %train
all_train = list()
all_validation = list()
all_test = list()
sorted_dict = {
"train": all_train,
"validation": all_validation,
"test": all_test,
}
# get preassigned sorts
train = f[f["sort"] == "train"] #all patients pre-assigned to training
validation = f[f["sort"] == "validation"] #all patients pre-assigned to validation
test = f[f["sort"] == "test"] #all patients pre-assigned to testing
presort_dict = {
"train": train,
"validation": validation,
"test": test,
}
# recalculate goals based on preassigned sorts
for s in ["train", "validation", "test"]:
presorted = presort_dict[s]
for l in labels:
goal_sort[(l, s)] = max(0, goal_sort[(l, s)] - len(presorted[presorted[label_form] == l]))
# add preassigned sorts and remove from lesions to sort
all_train.append(train)
all_validation.append(validation)
all_test.append(test)
remaining = remaining.drop(train.index)
remaining = remaining.drop(validation.index)
remaining = remaining.drop(test.index)
# sort remaining lesions
for l in labels:
for s in ["train", "validation", "test"]:
label_set = remaining[remaining[label_form] == l]
label_set = label_set.sample(n = min(goal_sort[(l, s)], len(label_set)), random_state=(int(seed) % 2 ** 32))
remaining = remaining.drop(label_set.index)
sorted_dict[s].append(label_set)
# append any left over to training set
all_train.append(remaining)
train = | pandas.concat(all_train) | pandas.concat |
"""
Created on Thu Jan 26 17:04:11 2017
@author: <NAME>, <EMAIL>
"""
#%matplotlib inline
import numpy as np
import pandas as pd
import dicom
import os
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
import scipy.ndimage # added for scaling
import cv2
import time
import glob
from skimage import measure, morphology, segmentation
import SimpleITK as sitk
RESIZE_SPACING = [2,2,2] # z, y, x (x & y MUST be the same)
RESOLUTION_STR = "2x2x2"
img_rows = 448
img_cols = 448 # global values
DO_NOT_USE_SEGMENTED = True
#STAGE = "stage1"
STAGE_DIR_BASE = "../input/%s/" # on one cluster we had input_shared
LUNA_MASKS_DIR = "../luna/data/original_lung_masks/"
luna_subset = 0 # initial
LUNA_BASE_DIR = "../luna/data/original_lungs/subset%s/" # added on AWS; data as well
LUNA_DIR = LUNA_BASE_DIR % luna_subset
CSVFILES = "../luna/data/original_lungs/CSVFILES/%s"
LUNA_ANNOTATIONS = CSVFILES % "annotations.csv"
LUNA_CANDIDATES = CSVFILES % "candidates.csv"
# Load the scans in given folder path (loads the most recent acquisition)
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#slices.sort(key = lambda x: int(x.InstanceNumber))
acquisitions = [x.AcquisitionNumber for x in slices]
vals, counts = np.unique(acquisitions, return_counts=True)
vals = vals[::-1] # reverse order so the later acquisitions are first (the np.uniques seems to always return the ordered 1 2 etc.
counts = counts[::-1]
## take the acquistions that has more entries; if these are identical take the later entrye
acq_val_sel = vals[np.argmax(counts)]
##acquisitions = sorted(np.unique(acquisitions), reverse=True)
if len(vals) > 1:
print ("WARNING ##########: MULTIPLE acquisitions & counts, acq_val_sel, path: ", vals, counts, acq_val_sel, path)
slices2= [x for x in slices if x.AcquisitionNumber == acq_val_sel]
slices = slices2
## ONE path includes 2 acquisitions (2 sets), take the latter acquiisiton only whihch cyupically is better than the first/previous ones.
## example of the '../input/stage1/b8bb02d229361a623a4dc57aa0e5c485'
#slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v 8, BUG should be float
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def get_3d_data_slices(slices): # get data in Hunsfield Units
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images )
image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0
# Convert to Hounsfield units (HU)
# The intercept is usually -1024
for slice_number in range(len(slices)): # from v 8
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1: # added 16 Jan 2016, evening
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def get_pixels_hu(slices):
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
### slope can differ per slice -- so do it individually (case in point black_tset, slices 95 vs 96)
### Changes/correction - 31.01.2017
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
MARKER_INTERNAL_THRESH = -400
MARKER_FRAME_WIDTH = 9 # 9 seems OK for the half special case ...
def generate_markers(image):
#Creation of the internal Marker
useTestPlot = False
if useTestPlot:
timg = image
plt.imshow(timg, cmap='gray')
plt.show()
add_frame_vertical = True
if add_frame_vertical: # add frame for potentially closing the lungs that touch the edge, but only vertically
fw = MARKER_FRAME_WIDTH # frame width (it looks that 2 is the minimum width for the algorithms implemented here, namely the first 2 operations for the marker_internal)
xdim = image.shape[1]
#ydim = image.shape[0]
img2 = np.copy(image)
#y3 = ydim // 3
img2 [:, 0] = -1024
img2 [:, 1:fw] = 0
img2 [:, xdim-1:xdim] = -1024
img2 [:, xdim-fw:xdim-1] = 0
marker_internal = img2 < MARKER_INTERNAL_THRESH
else:
marker_internal = image < MARKER_INTERNAL_THRESH # was -400
useTestPlot = False
if useTestPlot:
timg = marker_internal
plt.imshow(timg, cmap='gray')
plt.show()
correct_edges2 = False ## NOT a good idea - no added value
if correct_edges2:
marker_internal[0,:] = 0
marker_internal[:,0] = 0
#marker_internal[:,1] = True
#marker_internal[:,2] = True
marker_internal[511,:] = 0
marker_internal[:,511] = 0
marker_internal = segmentation.clear_border(marker_internal, buffer_size=0)
marker_internal_labels = measure.label(marker_internal)
areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
external_a = ndimage.binary_dilation(marker_internal, iterations=10) # was 10
external_b = ndimage.binary_dilation(marker_internal, iterations=55) # was 55
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
# Some of the starting Code is taken from ArnavJain, since it's more readable then my own
def generate_markers_3d(image):
#Creation of the internal Marker
marker_internal = image < -400
marker_internal_labels = np.zeros(image.shape).astype(np.int16)
for i in range(marker_internal.shape[0]):
marker_internal[i] = segmentation.clear_border(marker_internal[i])
marker_internal_labels[i] = measure.label(marker_internal[i])
#areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas = [r.area for i in range(marker_internal.shape[0]) for r in measure.regionprops(marker_internal_labels[i])]
for i in range(marker_internal.shape[0]):
areas = [r.area for r in measure.regionprops(marker_internal_labels[i])]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels[i]):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[i, coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
# 3x3 structuring element with connectivity 1, used by default
struct1 = ndimage.generate_binary_structure(2, 1)
struct1 = struct1[np.newaxis,:,:] # expand by z axis .
external_a = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=10)
external_b = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=55)
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
BINARY_CLOSING_SIZE = 7 #was 7 before final; 5 for disk seems sufficient - for safety let's go with 6 or even 7
def seperate_lungs(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct)
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512))) ### was -2000
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def rescale_n(n,reduce_factor):
return max( 1, int(round(n / reduce_factor)))
def seperate_lungs_cv2(image): # for increased speed
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#image_size = image.shape[0]
reduce_factor = 512 / image.shape[0]
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
useTestPlot = False
if useTestPlot:
timg = sobel_gradient
plt.imshow(timg, cmap='gray')
plt.show()
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
if useTestPlot:
timg = marker_external
plt.imshow(timg, cmap='gray')
plt.show()
#Reducing the image created by the Watershed algorithm to its outline
#wsize = rescale_n(3,reduce_factor) # THIS IS TOO SMALL, dynamically adjusting the size for the watersehed algorithm
outline = ndimage.morphological_gradient(watershed, size=(3,3)) # original (3,3), (wsize, wsize) is too small to create an outline
outline = outline.astype(bool)
outline_u = outline.astype(np.uint8) #added
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
use_reduce_factor = True
if use_reduce_factor:
blackhat_struct = ndimage.iterate_structure(blackhat_struct, rescale_n(8,reduce_factor)) # dyanmically adjust the number of iterattions; original was 8
else:
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct_cv2 = blackhat_struct.astype(np.uint8)
#Perform the Black-Hat
#outline += ndimage.black_tophat(outline, structure=blackhat_struct) # original slow
#outline1 = outline + (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool)
#outline2 = outline + ndimage.black_tophat(outline, structure=blackhat_struct)
#np.array_equal(outline1,outline2) # True
outline += (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool) # fats
if useTestPlot:
timg = outline
plt.imshow(timg, cmap='gray')
plt.show()
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
if useTestPlot:
timg = lungfilter
plt.imshow(timg, cmap='gray')
plt.show()
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure2 = morphology.disk(2) # used to fill the gaos/holes close to the border (otherwise the large sttructure would create a gap by the edge)
if use_reduce_factor:
structure3 = morphology.disk(rescale_n(BINARY_CLOSING_SIZE,reduce_factor)) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
else:
structure3 = morphology.disk(BINARY_CLOSING_SIZE) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
##lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, ORIGINAL iterations=3) # was structure=np.ones((5,5))
lungfilter2 = ndimage.morphology.binary_closing(lungfilter, structure=structure2, iterations=3) # ADDED
lungfilter3 = ndimage.morphology.binary_closing(lungfilter, structure=structure3, iterations=3)
lungfilter = np.bitwise_or(lungfilter2, lungfilter3)
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
#image.shape
#segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512)).astype(np.int16)) # was -2000 someone suggested 30
segmented = np.where(lungfilter == 1, image, -2000*np.ones(image.shape).astype(np.int16)) # was -2000 someone suggested 30
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def seperate_lungs_3d(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers_3d(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, axis=2)
sobel_filtered_dy = ndimage.sobel(image, axis=1)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(1,3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct = blackhat_struct[np.newaxis,:,:]
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct) # very long time
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
structure = structure[np.newaxis,:,:]
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones(marker_internal.shape))
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def get_slice_location(dcm):
return float(dcm[0x0020, 0x1041].value)
def thru_plane_position(dcm):
"""Gets spatial coordinate of image origin whose axis
is perpendicular to image plane.
"""
orientation = tuple((float(o) for o in dcm.ImageOrientationPatient))
position = tuple((float(p) for p in dcm.ImagePositionPatient))
rowvec, colvec = orientation[:3], orientation[3:]
normal_vector = np.cross(rowvec, colvec)
slice_pos = np.dot(position, normal_vector)
return slice_pos
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))
spacing = np.array(list(spacing))
#scan[2].SliceThickness
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest') ### early orig modified
return image, new_spacing
def segment_all(stage, part=0, processors=1, showSummaryPlot=True): # stage added to simplify the stage1 and stage2 calculations
count = 0
STAGE_DIR = STAGE_DIR_BASE % stage
folders = glob.glob(''.join([STAGE_DIR,'*']))
if len(folders) == 0:
print ("ERROR, check directory, no folders found in: ", STAGE_DIR )
for folder in folders:
count += 1
if count % processors == part: # do this part in this process, otherwise skip
path = folder
slices = load_scan(path)
image_slices = get_3d_data_slices(slices)
#mid = len(image_slices) // 2
#img_sel = mid
useTestPlot = False
if useTestPlot:
print("Shape before segmenting\t", image_slices.shape)
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
start = time.time()
resampleImages = True
if resampleImages:
image_resampled, spacing = resample(image_slices, slices, RESIZE_SPACING) # let's start wkith this small resolutuion for workign our the system (then perhaps 2, 0.667, 0.667)
print("Shape_before_&_after_resampling\t", image_slices.shape,image_resampled.shape)
if useTestPlot:
plt.imshow(image_slices[image_slices.shape[0]//2], cmap=plt.cm.bone)
plt.show()
plt.imshow(image_resampled[image_resampled.shape[0]//2], cmap=plt.cm.bone)
np.max(image_slices)
np.max(image_resampled)
np.min(image_slices)
np.min(image_resampled)
plt.show()
image_slices = image_resampled
shape = image_slices.shape
l_segmented = np.zeros(shape).astype(np.int16)
l_lungfilter = np.zeros(shape).astype(np.bool)
l_outline = np.zeros(shape).astype(np.bool)
l_watershed = np.zeros(shape).astype(np.int16)
l_sobel_gradient = np.zeros(shape).astype(np.float32)
l_marker_internal = np.zeros(shape).astype(np.bool)
l_marker_external = np.zeros(shape).astype(np.bool)
l_marker_watershed = np.zeros(shape).astype(np.int16)
# start = time.time()
i=0
for i in range(shape[0]):
l_segmented[i], l_lungfilter[i], l_outline[i], l_watershed[i], l_sobel_gradient[i], l_marker_internal[i], l_marker_external[i], l_marker_watershed[i] = seperate_lungs_cv2(image_slices[i])
print("Rescale & Seg time, and path: ", ((time.time() - start)), path )
if useTestPlot:
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(l_segmented.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
img_sel_i = shape[0] // 2
# Show some slice in the middle
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
path_rescaled = path.replace(stage, ''.join([stage, "_", RESOLUTION_STR]), 1)
path_segmented = path.replace(stage, ''.join([stage, "_segmented_", RESOLUTION_STR]), 1)
path_segmented_crop = path.replace(stage, ''.join([stage, "_segmented_", RESOLUTION_STR, "_crop"]), 1)
np.savez_compressed (path_rescaled, image_slices)
np.savez_compressed (path_segmented, l_segmented)
mask = l_lungfilter.astype(np.int8)
regions = measure.regionprops(mask) # this measures the largest region and is a bug when the mask is not the largest region !!!
bb = regions[0].bbox
#print(bb)
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
dx = 0 # could be reduced
## have to reduce dx as for istance at least image the lungs stretch right to the border evebn without cropping
## namely for '../input/stage1/be57c648eb683a31e8499e278a89c5a0'
crop_max_ratio_z = 0.6 # 0.8 is to big make_submit2(45, 1)
crop_max_ratio_y = 0.4
crop_max_ratio_x = 0.6
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
mask_shape= mask.shape
image_shape = l_segmented.shape
mask_volume = zlen*ylen*zlen /(mask_shape[0] * mask_shape[1] * mask_shape[2])
mask_volume_thresh = 0.08 # anything below is too small (maybe just one half of the lung or something very small0)
mask_volume_check = mask_volume > mask_volume_thresh
# print ("Mask Volume: ", mask_volume )
### DO NOT allow the mask to touch x & y ---> if it does it is likely a wrong one as for:
## folders[3] , path = '../input/stage1/9ba5fbcccfbc9e08edcfe2258ddf7
maskOK = False
if bxy_min >0 and bxy_max < 512 and mask_volume_check and zlen/mask_shape[0] > crop_max_ratio_z and ylen/mask_shape[1] > crop_max_ratio_y and xlen/mask_shape[2] > crop_max_ratio_x:
## square crop and at least dx elements on both sides on x & y
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
if bxy_min == 0 or bxy_max == 512:
# Mask to bigg, auto-correct
print("The following mask likely too big, autoreducing by:", dx)
bxy_min = np.max((bxy_min, dx))
bxy_max = np.min ((bxy_max, mask_shape[1] - dx))
image = l_segmented[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
mask = mask[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
#maskOK = True
print ("Shape, cropped, bbox ", mask_shape, mask.shape, bb)
elif bxy_min> 0 and bxy_max < 512 and mask_volume_check and zlen/mask.shape[0] > crop_max_ratio_z:
## cut on z at least
image = l_segmented[bb[0]:bb[3], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[bb[0]:bb[3], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask too small, NOT auto-cropping x-y: shape, cropped, bbox, ratios, violume:", mask_shape, image.shape, bb, path, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
else:
image = l_segmented[0:mask_shape[0], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[0:mask_shape[0], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask wrong, NOT auto-cropping: shape, cropped, bbox, ratios, volume:", mask_shape, image.shape, bb, path, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
if showSummaryPlot:
img_sel_i = shape[0] // 2
# Show some slice in the middle
useSeparatePlots = False
if useSeparatePlots:
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
else:
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(image_slices[img_sel_i],cmap=plt.cm.bone)
ax[1].imshow(l_segmented[img_sel_i],cmap=plt.cm.bone)
plt.show()
# Show some slice in the middle
#plt.imshow(image[image.shape[0] // 2], cmap='gray') # don't show it for simpler review
#plt.show()
np.savez_compressed(path_segmented_crop, image)
#print("Mask count: ", count)
#print ("Shape: ", image.shape)
return part, processors, count
# the following 3 functions to read LUNA files are from: https://www.kaggle.com/arnavkj95/data-science-bowl-2017/candidate-generation-and-luna16-preprocessing/notebook
'''
This funciton reads a '.mhd' file using SimpleITK and return the image array,
origin and spacing of the image.
'''
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return ct_scan, origin, spacing
'''
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
'''
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
'''
This function is used to convert the voxel coordinates to world coordinates using
the origin and spacing of the ct_scan.
'''
def voxel_2_world(voxel_coordinates, origin, spacing):
stretched_voxel_coordinates = voxel_coordinates * spacing
world_coordinates = stretched_voxel_coordinates + origin
return world_coordinates
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
'''
This function is used to create spherical regions in binary masks
at the given locations and radius.
'''
#image = lung_img
#spacing = new_spacing
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
#get middel x-,y-, and z-worldcoordinate of the nodule
#radius = np.ceil(ca[4])/2 ## original: replaced the ceil with a very minor increase of 1% ....
radius = (ca[4])/2 + 0.51 * spacing[0] # increasing by circa half of distance in z direction .... (trying to capture wider region/border for learning ... and adress the rough net .
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#x = y = z = -2
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (contrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
'''
This function takes the path to a '.mhd' file as input and
is used to create the nodule masks and segmented lungs after
rescaling to 1mm size in all directions. It saved them in the .npz
format. It also takes the list of nodule locations in that CT Scan as
input.
'''
def load_scans_masks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = | pd.read_csv(LUNA_ANNOTATIONS) | pandas.read_csv |
"""This module is meant to contain the Solscan class"""
from messari.dataloader import DataLoader
from messari.utils import validate_input
from string import Template
from typing import Union, List, Dict
from .helpers import unpack_dataframe_of_dicts
import pandas as pd
#### Block
BLOCK_LAST_URL = 'https://public-api.solscan.io/block/last'
BLOCK_TRANSACTIONS_URL = 'https://public-api.solscan.io/block/transactions'
BLOCK_BLOCK_URL = Template('https://public-api.solscan.io/block/$block')
#### Transaction
TRANSACTION_LAST_URL = 'https://public-api.solscan.io/transaction/last'
TRANSACTION_SIGNATURE_URL = Template('https://public-api.solscan.io/transaction/$signature')
#### Account
ACCOUNT_TOKENS_URL = 'https://public-api.solscan.io/account/tokens'
ACCOUNT_TRANSACTIONS_URL = 'https://public-api.solscan.io/account/transactions'
ACCOUNT_STAKE_URL = 'https://public-api.solscan.io/account/stakeAccounts'
ACCOUNT_SPL_TXNS_URL = 'https://public-api.solscan.io/account/splTransfers'
ACCOUNT_SOL_TXNS_URL = 'https://public-api.solscan.io/account/solTransfers'
ACCOUNT_EXPORT_TXNS_URL = 'https://public-api.solscan.io/account/exportTransactions'
ACCOUNT_ACCOUNT_URL = Template('https://public-api.solscan.io/account/$account')
#### Token
TOKEN_HOLDERS_URL = 'https://public-api.solscan.io/token/holders'
TOKEN_META_URL = 'https://public-api.solscan.io/token/meta'
TOKEN_LIST_URL = 'https://public-api.solscan.io/token/list'
#### Market
MARKET_INFO_URL = Template('https://public-api.solscan.io/market/token/$tokenAddress')
#### Chain Information
CHAIN_INFO_URL = 'https://public-api.solscan.io/chaininfo'
#TODO max this clean/ not hardcoded? look into how this works
HEADERS={'accept': 'application/json', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'} # pylint: disable=line-too-long
class Solscan(DataLoader):
"""This class is a wrapper around the Solscan API
"""
def __init__(self):
DataLoader.__init__(self, api_dict=None, taxonomy_dict=None)
#################
# Block endpoints
def get_last_blocks(self, num_blocks=1) -> pd.DataFrame:
"""returns info for last blocks (default is 1, limit is 20)
Parameters
----------
num_blocks: int (default is 1)
number of blocks to return, max is 20
Returns
-------
DataFrame
DataFrame with block information
"""
# Max value is 20 or API bricks
limit=num_blocks if num_blocks < 21 else 20
params = {'limit': limit}
last_blocks = self.get_response(BLOCK_LAST_URL,
params=params,
headers=HEADERS)
last_blocks_df = pd.DataFrame(last_blocks)
last_blocks_df.set_index('currentSlot', inplace=True)
last_blocks_df = unpack_dataframe_of_dicts(last_blocks_df)
# TODO, extract data from 'result'
return last_blocks_df
def get_block_last_transactions(self, blocks_in: Union[str, List],
offset=0, num_transactions=10) -> pd.DataFrame:
"""get last num_transactions of given block numbers
Parameters
----------
blocks_in: str, List
single block in or list of blocks in
num_transactions: int (default is 10)
number of transactions to return
Returns
-------
DataFrame
dataframe with transaction details
"""
blocks = validate_input(blocks_in)
df_list = []
for block in blocks:
params = {'block': block,
'offset': offset,
'limit': num_transactions}
txns = self.get_response(BLOCK_TRANSACTIONS_URL,
params=params,
headers=HEADERS)
txns_df = pd.DataFrame(txns)
df_list.append(txns_df)
fin_df = pd.concat(df_list, keys=blocks, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_block(self, blocks_in: Union[str, List]) -> pd.DataFrame:
"""Return information of given block(s)
Parameters
----------
blocks_in: str, List
single block in or list of blocks in
Returns
-------
DataFrame
DataFrame with block information
"""
blocks = validate_input(blocks_in)
df_list = []
for block in blocks:
endpoint_url = BLOCK_BLOCK_URL.substitute(block=block)
response = self.get_response(endpoint_url,
headers=HEADERS)
df = pd.DataFrame(response)
df.drop('currentSlot', axis=1)
df_list.append(df)
fin_df = pd.concat(df_list, keys=blocks, axis=1)
fin_df = fin_df.xs('result', axis=1, level=1)
return fin_df
#######################
# Transaction endpoints
def get_last_transactions(self, num_transactions=10) -> pd.DataFrame:
"""Return last num_transactions transactions
Parameters
----------
num_transactions: int (default is 10)
number of transactions to return, limit is 20
Returns
-------
DataFrame
dataframe with transaction details
"""
# 20
limit=num_transactions if num_transactions < 21 else 20
params = {'limit': limit}
response = self.get_response(TRANSACTION_LAST_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
fin_df = unpack_dataframe_of_dicts(df)
return fin_df
def get_transaction(self, signatures_in: Union[str, List]) -> pd.DataFrame:
"""Return information of given transaction signature(s)
Parameters
----------
signatures_in: str, List
single signature in or list of signatures in
Returns
-------
DataFrame
DataFrame with transaction details
"""
signatures = validate_input(signatures_in)
series_list = []
for signature in signatures:
endpoint_url = TRANSACTION_SIGNATURE_URL.substitute(signature=signature)
response = self.get_response(endpoint_url,
headers=HEADERS)
#print(response)
series = pd.Series(response)
series_list.append(series)
fin_df = pd.concat(series_list, keys=signatures, axis=1)
return fin_df
###################
# Account endpoints
def get_account_tokens(self, accounts_in: Union[str, List]) -> pd.DataFrame:
"""Return token balances of the given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with token balances of given accounts
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account}
response = self.get_response(ACCOUNT_TOKENS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
return fin_df
def get_account_transactions(self, accounts_in: Union[str,List]) -> pd.DataFrame:
"""Return DataFrame of transactions of the given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with transactions of given accounts
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account}
response = self.get_response(ACCOUNT_TRANSACTIONS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
return fin_df
def get_account_stake(self, accounts_in: Union[str, List]) -> pd.DataFrame:
"""Get staking accounts of the given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with staking accounts of given accounts
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account}
response = self.get_response(ACCOUNT_STAKE_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = | pd.concat(df_list, keys=accounts, axis=1) | pandas.concat |
# TODO(*): Move to ib/medata and rename contract_metadata.py
import logging
import os
from typing import List
import ib_insync
import pandas as pd
import helpers.io_ as hio
import im.ib.data.extract.gateway.utils as videgu
_LOG = logging.getLogger(__name__)
class IbMetadata:
def __init__(self, file_name: str) -> None:
self.file_name = file_name
def load(self) -> pd.DataFrame:
"""
Load the data generated through update.
The df looks like:
conId,secType,symbol,lastTradeDateOrContractMonth,strike,right,multiplier,
exchange,primaryExchange,currency,localSymbol,tradingClass,
includeExpired,secIdType,secId,comboLegsDescrip,comboLegs,
deltaNeutralContract
81596321,FUT,NG,20190327,0.0,,10000,NYMEX,,USD,NGJ9,NG,False,,,,[],
81596321,FUT,NG,20190327,0.0,,10000,QBALGO,,USD,NGJ9,NG,False,,,,[],
81596324,FUT,NG,20190426,0.0,,10000,NYMEX,,USD,NGK9,NG,False,,,,[],
"""
if os.path.exists(self.file_name):
df = pd.read_csv(self.file_name, index_col=0)
df = self._clean(df)
else:
_LOG.debug("No file '%s'", self.file_name)
df = pd.DataFrame()
return df
def update(
self,
ib: ib_insync.ib.IB,
contracts: List[ib_insync.Contract],
append: bool = False,
) -> None:
"""
Update metadata in `file_name` for the given contracts.
:param append: if True it keeps appending
"""
dfs = []
for contract in contracts:
df_tmp = videgu.get_contract_details(ib, contract)
dfs.append(df_tmp)
df = | pd.concat(dfs, axis=0) | pandas.concat |
import pandas as pd
import numpy as np
import unittest
from dstools.preprocessing.Bucketizer import Bucketizer
class TestBucketizer(unittest.TestCase):
def compare_DataFrame(self, df_transformed, df_transformed_correct):
"""
helper function to compare the values of the transformed DataFrame with the values of a correctly transformed DataFrame
"""
#same number of columns
self.assertEqual(len(df_transformed.columns), len(df_transformed_correct.columns))
#check for every column in correct DataFrame, that all items are equal
for column in df_transformed_correct.columns:
#compare every element
for x, y in zip(df_transformed[column], df_transformed_correct[column]):
#if both values are np.NaN, the assertion fails, although they are equal
#if np.isnan(x)==True and np.isnan(y)==True: --> doesn't work with strings
if pd.isnull(x)==True and pd.isnull(y)==True:
pass
else:
self.assertEqual(x, y)
def test_no_numeric_feature(self):
"""
no transformation should be performed
"""
df=pd.DataFrame({'x':[np.NaN,'b','c']})
df_transformed_correct=pd.DataFrame({'x':[np.NaN,'b','c']})
bucket = Bucketizer(features=['x'])
df_transformed = bucket.fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
def test_one_numeric_feature_no_transformation(self):
"""
no transformation should be performed
"""
df= | pd.DataFrame({'x':[1,2,3]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy import interpolate
import pickle # to serialise objects
from scipy import stats
import seaborn as sns
from sklearn import metrics
from sklearn.model_selection import train_test_split
sns.set(style='whitegrid', palette='muted', font_scale=1.5)
RANDOM_SEED = 42
dataset_train = pd.read_csv('final_training_set_8people.csv')
training_set = pd.DataFrame(dataset_train.iloc[:,:].values)
training_set.columns = ["User","Activity", "Timeframe", "X axis", "Y axis", "Z axis"]
X = training_set.iloc[:, 3]
X = X.astype(float)
X = (X*1000000).astype('int64')
Y = training_set.iloc[:, 4]
Y = Y.astype(float)
Y = (Y*1000000).astype('int64')
Z = training_set.iloc[:, 5]
Z = Z.astype(float)
Z = (Z*1000000).astype('int64')
Old_T = (training_set.iloc[:, 2]).astype(float)
Old_T = (Old_T * 1000000)
Old_T = Old_T.astype('int64')
New_T = np.arange(0, 12509996000, 50000)
New_T = New_T.astype('int64')
# find interpolation function
interpolate_function = interpolate.interp1d(Old_T, X, axis = 0, fill_value="extrapolate")
X_Final = interpolate_function((New_T))
interpolate_function = interpolate.interp1d(Old_T, Y, axis = 0, fill_value="extrapolate")
Y_Final = interpolate_function((New_T))
interpolate_function = interpolate.interp1d(Old_T, Z, axis = 0, fill_value="extrapolate")
Z_Final = interpolate_function((New_T))
#Combining data into one pandas dataframe
Dataset = pd.DataFrame()
Dataset['X_Final'] = X_Final
Dataset['Y_Final'] = Y_Final
Dataset['Z_Final'] = Z_Final
Dataset['New_Timeframe'] = New_T
Dataset = Dataset/1e6
Dataset = Dataset[['New_Timeframe', 'X_Final', 'Y_Final', 'Z_Final']]
Dataset['New_Activity'] = ""
#Dataset = Dataset.astype('int64')
Dataset = Dataset[['New_Activity', 'New_Timeframe', 'X_Final', 'Y_Final', 'Z_Final']]
#function to fill in new dataset with related activity
Dataset = Dataset.to_numpy()
training_set = training_set.to_numpy()
time = 0
temp = training_set[0][1]
var_to_assign = ""
last_row = 0
new_row = 0
for i in range(len(training_set)-1):
if(training_set[i][1] == temp):
continue
if (training_set[i][1] != temp):
var_to_assign = temp
temp = training_set[i][1]
time = training_set[i][2]
a1 = [x for x in Dataset[:, 1] if x <= time]
new_row = len(a1)
Dataset[last_row:new_row+1, 0] = var_to_assign
last_row = new_row
continue
#converting both arrays back to Dataframes
Dataset = pd.DataFrame(Dataset)
Dataset.columns = ['New_Activity', 'New_Timeframe', 'X_Final', 'Y_Final', 'Z_Final']
training_set = | pd.DataFrame(training_set) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 25 16:14:45 2021
@author: bdobson
"""
import os
import pandas as pd
import geopandas as gpd
from matplotlib import pyplot as plt
root = os.path.join("C:\\", "Users", "bdobson", "Documents", "GitHub", "cwsd_sewer","data")
catchment = "cranbrook"
cluster = 'cluster_Louv_266'
rain = "august"
dt = "sim_dt_30_s"
cluster_root = os.path.join(root,catchment,"results","2021-03-02",cluster,dt)
results_root = os.path.join(cluster_root, rain)
plots_root = os.path.join(root, catchment, "results", "2021-03-02", "plots")
info_fid = os.path.join(results_root, "highfid_flows.gzip")
flow_fid = os.path.join(results_root, "flows.gzip")
dep_fid = os.path.join(results_root, "depths.gzip")
info_df = pd.read_parquet(info_fid).set_index('time')
info_df.index = pd.to_datetime(info_df.index)
flow_df = pd.read_parquet(flow_fid)
flow_df.index = pd.to_datetime(flow_df.index)
edges_gdf = gpd.read_file(os.path.join(cluster_root, "compartment_edges.geojson"))
nodes_gdf = gpd.read_file(os.path.join(cluster_root, "compartment_nodes.geojson"))
info_fid = os.path.join(results_root, "highfid_nodes.gzip")
node_fid = os.path.join(results_root, "storages.gzip")
infon_df = pd.read_parquet(info_fid).set_index('time')
infon_df.index = pd.to_datetime(infon_df.index)
# infon_df['volume'] = infon_df['volume'] + infon_df['floodvol']
node_df = | pd.read_parquet(node_fid) | pandas.read_parquet |
import pandas as pd
import argparse
from matplotlib_venn import venn3
import matplotlib.pyplot as plt
import math
def get_args():
desc = 'Given sj files, see which splice junctions are shared/unique between datasets'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-sj_1', dest='sj_1',
help = '1st splice junction file')
parser.add_argument('-sj_1_name', dest='sj_1_name',
help = '1st splice junction file sample name ie "Gencode"')
parser.add_argument('-sj_2', dest='sj_2',
help = '2nd splice junction file')
parser.add_argument('-sj_2_name', dest='sj_2_name',
help = '2nd splice junction file sample name ie "Gencode"')
parser.add_argument('-sj_3', dest='sj_3',
help = '3rd splice junction file')
parser.add_argument('-sj_3_name', dest='sj_3_name',
help = '3rd splice junction file sample name ie "Gencode"')
parser.add_argument('-sample', dest='sample_name',
help = 'Sample name ie "PacBio GM12878"')
parser.add_argument('--log', dest='log_sizes', default=False,
action='store_true', help = 'Log the sizes of the circles')
args = parser.parse_args()
return args
def read_sj_file(infile, dtype):
df = pd.read_csv(infile, sep='\t',
names=['chrom', 'start', 'stop', 'strand'], usecols=[0,1,2,3])
# df.drop_duplicates(inplace=True)
return df
def find_intersect_counts(dfa, dfb, dfc, args):
# intersection of all (a,b,c)
temp = pd.merge(dfa, dfb, how='inner', on=['chrom', 'start', 'stop', 'strand'])
temp = pd.merge(temp, dfc, how='inner', on=['chrom', 'start', 'stop', 'strand'])
count_abc = len(temp.index)
# intersection of (a,b)
temp = pd.merge(dfa, dfb, how='inner', on=['chrom', 'start', 'stop', 'strand'])
count_ab = len(temp.index) - count_abc
# intersection of (a,c)
temp = | pd.merge(dfa, dfc, how='inner', on=['chrom', 'start', 'stop', 'strand']) | pandas.merge |
#!/usr/bin/env python3
#libraries
import pandas as pd
import numpy as np
import re
import os
pd.set_option('display.max_rows',200)
pd.set_option('display.max_columns',200)
import matplotlib.pyplot as plt
import seaborn as sns
import pymysql
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from datetime import timedelta
import warnings
warnings.filterwarnings("ignore")
def read_sql_table(table_name):
db = pymysql.connect(host='localhost', user="###",passwd="####" )
cur = db.cursor()
sql="SELECT * FROM {} ".format(table_name)
in_data = pd.read_sql(sql,db)
return in_data
uspa_subset = read_sql_table("recoms.uspa_subset")
uspa_subset['mobile'] = uspa_subset['mobile'].astype(str)
billing_data = uspa_subset[['first_name','mobile','bill_date','Bill_time','bill_date_time','bill_amount','bill_discount','total_quantity']]
billing_data['key'] = billing_data['mobile'] + ' ' + billing_data['bill_date'].astype(str)
billing_data.drop_duplicates('key',inplace=True)
billing_avg = billing_data.groupby(['mobile']).agg({'bill_amount':'mean','bill_discount':'mean',
'total_quantity':'mean','key':'count',
'bill_date':'max'}).reset_index()
billing_avg_temp = billing_data.groupby(['mobile']).agg({'bill_date':'min'}).reset_index()
billing_avg.rename(columns = {'bill_amount':'average_bill_amount','bill_discount':'average_bill_discount',
'total_quantity':'quantities_per_visit','key':'visit_count','bill_date':'last_visit_date'},inplace=True)
def visit_freq_bins(visit_counts,frequency=45):
visit_count_bin = visit_counts//frequency
if (visit_counts > 359):
formatted_bin = '> 1 year'
else:
formatted_bin = str(visit_count_bin * frequency) + "-" + str((visit_count_bin +1) * frequency)
return formatted_bin
billing_avg_temp.rename(columns={'bill_date':'first_visit_date'},inplace=True)
billing_avg = pd.merge(billing_avg,billing_avg_temp,left_on = 'mobile',right_on = 'mobile',how='left')
billing_avg['last_visit_date'] = pd.to_datetime(billing_avg['last_visit_date'])
billing_avg['first_visit_date'] = pd.to_datetime(billing_avg['first_visit_date'])
billing_avg['total_days'] = (billing_avg['last_visit_date'] - billing_avg['first_visit_date']).dt.days
billing_avg['average_visit_days'] = billing_avg['total_days'] / billing_avg['visit_count']
billing_avg['average_visit_days_round'] = billing_avg['average_visit_days'].apply(np.ceil).astype(int)
temp = billing_avg['average_visit_days'].apply(lambda x:pd.Timedelta(x,unit='D'))
billing_avg['proj_next_visit'] = billing_avg['last_visit_date'] + temp
billing_avg['proj_next_visit'] = billing_avg['proj_next_visit'].dt.date
billing_avg['visit_count_bin'] = billing_avg['average_visit_days_round'].apply(visit_freq_bins)
uspa_color = uspa_subset[['mobile','variantdescription']]
uspa_color['key'] = uspa_color['mobile'] + ' ' + uspa_color['variantdescription']
uspa_color['key'] = uspa_color['key'].apply(lambda x:str(x).lower())
uspa_preferred_color = uspa_color['key'].value_counts().to_frame().reset_index().rename(columns={'index':'mob_col','key':'count'})
uspa_preferred_color = uspa_preferred_color[uspa_preferred_color['mob_col'] != 'nan']
uspa_preferred_color['mobile'] = uspa_preferred_color['mob_col'].apply(lambda x:x.split(' ')[0])
uspa_preferred_color['color'] = uspa_preferred_color['mob_col'].apply(lambda x:x.split(' ')[1])
uspa_preferred_color_temp = uspa_preferred_color.groupby(['mobile']).agg({'count':'max'}).reset_index()
uspa_preferred_color_temp['key_1'] = uspa_preferred_color_temp['mobile'].astype(str) + ' ' + uspa_preferred_color_temp['count'].astype(str)
uspa_preferred_color['key_1'] = uspa_preferred_color['mobile'].astype(str) + ' ' + uspa_preferred_color['count'].astype(str)
uspa_color_final_1 = pd.merge(uspa_preferred_color,uspa_preferred_color_temp, left_on='key_1',right_on ='key_1',how='left')
uspa_color_final_1 = uspa_color_final_1.drop_duplicates(['mobile_x'])
uspa_color_dataframe = uspa_color_final_1[['mobile_x','color']]
billing_avg = pd.merge(billing_avg,uspa_color_dataframe, left_on = 'mobile',right_on='mobile_x',how = 'left')
billing_avg.drop(['mobile_x'],axis=1,inplace=True)
uspa_prod_type = uspa_subset[['mobile','materialtypedescription']]
uspa_prod_type['key'] = uspa_prod_type['mobile'] + ' ' + uspa_prod_type['materialtypedescription']
uspa_prod_type['key'] = uspa_prod_type['key'].apply(lambda x:str(x).lower())
uspa_preferred_prod = uspa_prod_type['key'].value_counts().to_frame().reset_index().rename(columns={'index':'mob_prod','key':'count'})
uspa_preferred_prod = uspa_preferred_prod[uspa_preferred_prod['mob_prod'] != 'nan']
uspa_preferred_prod['mobile'] = uspa_preferred_prod['mob_prod'].apply(lambda x:x.split(' ')[0])
uspa_preferred_prod['prod'] = uspa_preferred_prod['mob_prod'].apply(lambda x:x.split(' ')[1])
uspa_preferred_prod_temp = uspa_preferred_prod.groupby(['mobile']).agg({'count':'max'}).reset_index()
uspa_preferred_prod_temp['key_1'] = uspa_preferred_prod_temp['mobile'].astype(str) + ' ' + uspa_preferred_prod_temp['count'].astype(str)
uspa_preferred_prod['key_1'] = uspa_preferred_prod['mobile'].astype(str) + ' ' + uspa_preferred_color['count'].astype(str)
uspa_prod_final_1 = pd.merge(uspa_preferred_prod,uspa_preferred_prod_temp, left_on='key_1',right_on ='key_1',how='left')
uspa_prod_final_1 = uspa_prod_final_1.drop_duplicates(['mobile_x'])
uspa_prod_dataframe = uspa_prod_final_1[['mobile_x','prod']]
billing_avg = pd.merge(billing_avg,uspa_prod_dataframe, left_on = 'mobile',right_on='mobile_x',how = 'left')
billing_avg.drop(['mobile_x'],axis=1,inplace=True)
uspa_prod_type = uspa_subset[['mobile','gender']]
uspa_prod_type['key'] = uspa_prod_type['mobile'] + ' ' + uspa_prod_type['gender']
uspa_prod_type['key'] = uspa_prod_type['key'].apply(lambda x:str(x).lower())
uspa_preferred_prod = uspa_prod_type['key'].value_counts().to_frame().reset_index().rename(columns={'index':'mob_prod','key':'count'})
uspa_preferred_prod = uspa_preferred_prod[uspa_preferred_prod['mob_prod'] != 'nan']
uspa_preferred_prod['mobile'] = uspa_preferred_prod['mob_prod'].apply(lambda x:x.split(' ')[0])
uspa_preferred_prod['gender'] = uspa_preferred_prod['mob_prod'].apply(lambda x:x.split(' ')[1])
uspa_preferred_prod_temp = uspa_preferred_prod.groupby(['mobile']).agg({'count':'max'}).reset_index()
uspa_preferred_prod_temp['key_1'] = uspa_preferred_prod_temp['mobile'].astype(str) + ' ' + uspa_preferred_prod_temp['count'].astype(str)
uspa_preferred_prod['key_1'] = uspa_preferred_prod['mobile'].astype(str) + ' ' + uspa_preferred_color['count'].astype(str)
uspa_prod_final_1 = pd.merge(uspa_preferred_prod,uspa_preferred_prod_temp, left_on='key_1',right_on ='key_1',how='left')
uspa_prod_final_1 = uspa_prod_final_1.drop_duplicates(['mobile_x'])
uspa_prod_dataframe = uspa_prod_final_1[['mobile_x','gender']]
billing_avg = | pd.merge(billing_avg,uspa_prod_dataframe, left_on = 'mobile',right_on='mobile_x',how = 'left') | pandas.merge |
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os.path as op
import pandas as pd
import numpy as np
import mne
from mne.transforms import apply_trans, _get_trans
from mne.utils import _validate_type, _check_fname
from mne.io import BaseRaw
def _read_fold_xls(fname, atlas="Juelich"):
"""Read fOLD toolbox xls file.
The values are then manipulated in to a tidy dataframe.
Note the xls files are not included as no license is provided.
Parameters
----------
fname : str
Path to xls file.
atlas : str
Requested atlas.
"""
page_reference = {"AAL2": 2,
"AICHA": 5,
"Brodmann": 8,
"Juelich": 11,
"Loni": 14}
tbl = pd.read_excel(fname,
sheet_name=page_reference[atlas])
# Remove the spacing between rows
empty_rows = np.where(np.isnan(tbl["Specificity"]))[0]
tbl = tbl.drop(empty_rows).reset_index(drop=True)
# Empty values in the table mean its the same as above
for row_idx in range(1, tbl.shape[0]):
for col_idx, col in enumerate(tbl.columns):
if not isinstance(tbl[col][row_idx], str):
if np.isnan(tbl[col][row_idx]):
tbl.iloc[row_idx, col_idx] = \
tbl.iloc[row_idx - 1, col_idx]
tbl["Specificity"] = tbl["Specificity"] * 100
tbl["brainSens"] = tbl["brainSens"] * 100
return tbl
def _generate_montage_locations(montage='standard_1005'):
"""Get standard montage locations in dataframe.
Data is returned in the same format as the eeg_positions library.
Parameters
----------
montage : str
Standard MNE montage to use.
"""
montage = mne.channels.make_standard_montage(montage)
coords = pd.DataFrame.from_dict(
montage.get_positions()['ch_pos']).T
coords["label"] = coords.index
coords = coords.rename(columns={0: "x", 1: "y", 2: "z"})
return coords.reset_index(drop=True)
def _find_closest_standard_location(position, reference, trans_pos='mri'):
"""Return closest montage label to coordinates.
Parameters
----------
position : array
Coordinates.
reference : dataframe
As generated by _generate_montage_locations.
trans_pos : str
Apply a transformation to positions to specified frame.
Use None for no transformation.
"""
p0 = np.array(position)
if trans_pos is not None:
head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')
p0 = apply_trans(head_mri_t, p0)
dists = np.zeros(reference["x"].shape)
for idx in range(len(dists)):
p1 = np.array([reference["x"][idx],
reference["y"][idx],
reference["z"][idx]])
dists[idx] = np.linalg.norm(p0 - p1)
min_idx = np.argmin(dists)
return reference["label"][min_idx]
def fold_landmark_specificity(raw, landmark, fold_files=None,
atlas="Juelich"):
"""Return the specificity of each channel to a specified brain landmark.
Parameters
----------
raw : BaseRaw
The fNIRS data.
landmark : str
Landmark of interest. Must be present in fOLD toolbox data file.
fold_files : list | path-like | None
If None, will use the MNE_NIRS_FOLD_PATH config variable.
If path-like, should be a path to a directory containing '10-10.xls'
and '10-5.xls'. If list, should be paths to the fold toolbox files.
See the Notes section of :func:`~mne_nirs.io.fold_channel_specificity`
for details.
atlas : str
Brain atlas to use.
Returns
-------
spec : array
Specificity values for each channel to brain landmark.
See Also
--------
fold_landmark_specificity
Notes
-----
Specificity values are provided by the fOLD toolbox
:footcite:`morais2018fnirs` excel files. See the Notes section of
:func:`~mne_nirs.io.fold_channel_specificity` for more details.
References
----------
.. footbibliography::
"""
_validate_type(landmark, str, 'landmark')
_validate_type(raw, BaseRaw, 'raw')
reference_locations = _generate_montage_locations()
fold_tbl = _check_load_fold(fold_files, atlas)
specificity = np.zeros(len(raw.ch_names))
for cidx in range(len(raw.ch_names)):
tbl = _source_detector_fold_table(raw, cidx,
reference_locations, fold_tbl)
if len(tbl) > 0:
tbl["ContainsLmk"] = [landmark in la for la in tbl["Landmark"]]
tbl = tbl.query("ContainsLmk == True")["Specificity"]
if len(tbl) == 0:
continue
# print(f"No data for {src_name}-{det_name}")
elif len(tbl) == 1:
specificity[cidx] = tbl.values[0]
else:
raise RuntimeError("Multiple specificity values returned")
return np.array(specificity)
def fold_channel_specificity(raw, fold_files=None, atlas="Juelich"):
"""Return the landmarks and specificity a channel is sensitive to.
Parameters
----------
raw : BaseRaw
The fNIRS data.
fold_files : list | path-like | None
If None, will use the MNE_NIRS_FOLD_PATH config variable.
If path-like, should be a path to a directory containing '10-10.xls'
and '10-5.xls'. If list, should be paths to the fold toolbox files.
See Notes for details.
atlas : str
Brain atlas to use.
Returns
-------
spec : list of dataframes
List of dataframes, one for each channel.
See Also
--------
fold_landmark_specificity
Notes
-----
Specificity values are provided by the fOLD toolbox
:footcite:`morais2018fnirs` excel files.
For licensing reasons, these files are not distributed with MNE-NIRS.
You need to download them from
`the author's website <https://github.com/nirx/fOLD-public>`__.
To automatically utilize the ``MNE_NIRS_FOLD_PATH`` config for the
``fold_files`` parameter, you can download the entire ``fOLD-public``
repository `as a zip <https://github.com/nirx/fOLD-public/archive/refs/heads/master.zip>`__
and expand it to some suitable location like
``~/mne_data/fOLD/fOLD-public-master``, and then set the config value
on your machine by using :func:`mne:mne.set_config` like::
>>> mne.set_config('MNE_NIRS_FOLD_PATH', '~/mne_data/fOLD/fOLD-public-master/Supplementary')
From then on, :func:`~mne_nirs.io.fold_channel_specificity` and
:func:`~mne_nirs.io.fold_landmark_specificity` will automatically use this
directory to find the fOLD xls files when you pass ``fold_files=None``
(which is the default). We recommend following this procedure so that
the files can be reused automatically.
References
----------
.. footbibliography::
""" # noqa: E501
_validate_type(raw, BaseRaw, 'raw')
reference_locations = _generate_montage_locations()
fold_tbl = _check_load_fold(fold_files, atlas)
chan_spec = list()
for cidx in range(len(raw.ch_names)):
tbl = _source_detector_fold_table(raw, cidx,
reference_locations, fold_tbl)
chan_spec.append(tbl.reset_index(drop=True))
return chan_spec
def _check_load_fold(fold_files, atlas):
_validate_type(fold_files, (list, 'path-like', None), 'fold_files')
if fold_files is None:
fold_files = mne.get_config('MNE_NIRS_FOLD_PATH')
if fold_files is None:
raise ValueError(
'MNE_NIRS_FOLD_PATH not set, either set it using '
'mne.set_config or pass fold_files as str or list')
if not isinstance(fold_files, list): # path-like
fold_files = _check_fname(
fold_files, overwrite='read', must_exist=True, name='fold_files',
need_dir=True)
fold_files = [op.join(fold_files, f'10-{x}.xls') for x in (5, 10)]
fold_tbl = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""dlw9383-bandofthehawk-output.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/154b5GvPxORu_mhpHDIsNlWvyBxMIwEw2
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(rc={'figure.figsize':(15,10)})
pd.set_option("precision", 10)
import os
train=pd.read_csv("/content/PricingData.csv")
train.shape
train.isnull().sum()
train.info()
train = pd.concat([train, train['Seat Fare Type 1'].str.split(',', expand=True)], axis=1)
train.columns = train.columns.map(str)
train = train.rename({'0': 'berth1_1','1': 'berth2_1', '2': 'berth3_1','3': 'berth4_1', '4': 'berth5_1','5': 'berth6_1', '6': 'berth7_1'},axis='columns')
train.update(train[['berth1_1','berth2_1','berth3_1','berth4_1', 'berth5_1', 'berth6_1','berth7_1']].fillna(0))
train = pd.concat([train, train['Seat Fare Type 2'].str.split(',', expand=True)], axis=1)
train.columns = train.columns.map(str)
train = train.rename({'0': 'berth1_2','1': 'berth2_2', '2': 'berth3_2','3': 'berth4_2'},axis='columns')
train.update(train[['berth1_2','berth2_2','berth3_2','berth4_2']].fillna(0))
train.drop(['Seat Fare Type 1', 'Seat Fare Type 2','Service Date','RecordedAt'], axis=1, inplace=True)
train[train.columns[1:]] = train[train.columns[1:]].astype(float)
train.drop_duplicates(keep='first', inplace=True)
id = train["Bus"]
train.drop(['Bus'], axis=1, inplace=True)
reduced_data = train.values
from sklearn import preprocessing
min_max_scaler = preprocessing.MinMaxScaler()
reduced_data = min_max_scaler.fit_transform(reduced_data)
from sklearn.cluster import KMeans
data_frame = reduced_data
Sum_of_squared_distances = []
K = range(1,10)
for num_clusters in K :
kmeans = KMeans(n_clusters=num_clusters)
kmeans.fit(data_frame)
Sum_of_squared_distances.append(kmeans.inertia_)
plt.plot(K,Sum_of_squared_distances,'bx-')
plt.xlabel('Values of K')
plt.ylabel('Sum of squared distances/Inertia')
plt.title('Elbow Method For Optimal k')
plt.show()
from sklearn.decomposition import PCA
reduced_data = PCA(n_components=2).fit_transform(train)
results = pd.DataFrame(reduced_data,columns=['pca1','pca2'])
kmeansmodel = KMeans(n_clusters=4, init='k-means++', random_state=0)
y_kmeans= kmeansmodel.fit_predict(reduced_data)
y_k= | pd.DataFrame(y_kmeans, columns=['Clusters']) | pandas.DataFrame |
import pandas as pd
import plotly.express as px
# Passo 1 -Importar a basa de dados para o python
tabela = | pd.read_csv(r"C:\Users\jose_\OneDrive\Documentos\Estudos\arquivos_pyton\telecom_users.csv") | pandas.read_csv |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna_backfill(self):
# GH#6587
# make sure that we are treating as integer when filling
msg = "containing strings is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# this also tests inference of a datetime-like with NaT's
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = ser.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
null_loc = Series([False, True, False, True])
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00"),
"AAA",
Timestamp("2011-01-03 10:00"),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# DatetimeTZBlock
idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz)
ser = Series(idx)
assert ser.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
"AAA",
Timestamp("2011-01-03 10:00", tz=tz),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00", tz=tz),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# filling with a naive/other zone, coerce to object
result = ser.fillna(Timestamp("20130101"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
result = ser.fillna(Timestamp("20130101", tz="US/Pacific"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal( | isna(ser) | pandas.isna |
from typing import List
import pandas as pd
# Not covered: Essentially a script over other unit tested functions
def clean_data(df: pd.DataFrame) -> pd.DataFrame: # pragma: no cover
"""Parse
* location information to append city, state, zip code, and neighborhood columns
* salary information to append minimum and maximum annual salary in dollars
:param df:
Dataframe with "location" column
:return:
Dataframe with "location" column dropped but expanded into other,
finer-detailed columns
"""
result = df.drop_duplicates()
result = expand_location(result)
result = expand_salary(result)
return result
def expand_salary(df: pd.DataFrame) -> pd.DataFrame:
"""
Replace "salary" column with "min_annual_salary_$" and "max_annual_salary_$" columns
extracted from it.
:param df:
Dataframe with "salary" column
:return:
Dataframe with "salary" column dropped and "min_annual_salary_$" and
"max_annual_salary_$" columns appended
"""
original_cols = df.columns
salary = "salary"
replacement_columns = ["annual_salary_min_$", "annual_salary_max_$"]
columns_to_expanded = {salary: replacement_columns}
# Parse salary information into list
df["parsed_salary"] = df[df[salary].notnull()].apply(
func=lambda row: parse_salary(row[salary]), axis=1
)
# Explode (non-null) parsed list into its own columns
df_expanded_salary = (
df[["link", salary, "parsed_salary"]][df[salary].notnull()]
.apply(
func=lambda row: pd.concat(
[row, pd.Series([e for e in row["parsed_salary"]])], axis=0
),
axis=1,
)
.drop([salary, "parsed_salary"], axis=1)
# Rename the default-named exploded columns
.rename(columns={i: col for i, col in enumerate(replacement_columns)})
)
# Join non-null columns back in on unique identifier
df = | pd.merge(df, df_expanded_salary, how="outer", on="link") | pandas.merge |
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
from datetime import datetime, timedelta
import re
import numpy as np
import pytest
from pandas._libs import iNaT
import pandas._libs.index as _index
import pandas as pd
from pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range
import pandas._testing as tm
def test_fancy_getitem():
dti = date_range(
freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)
)
s = Series(np.arange(len(dti)), index=dti)
assert s[48] == 48
assert s["1/2/2009"] == 48
assert s["2009-1-2"] == 48
assert s[datetime(2009, 1, 2)] == 48
assert s[Timestamp(datetime(2009, 1, 2))] == 48
with pytest.raises(KeyError, match=r"^'2009-1-3'$"):
s["2009-1-3"]
tm.assert_series_equal(
s["3/6/2009":"2009-06-05"], s[datetime(2009, 3, 6) : datetime(2009, 6, 5)]
)
def test_fancy_setitem():
dti = date_range(
freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)
)
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
assert s[48] == -1
s["1/2/2009"] = -2
assert s[48] == -2
s["1/2/2009":"2009-06-05"] = -3
assert (s[48:54] == -3).all()
def test_dti_reset_index_round_trip():
dti = date_range(start="1/1/2001", end="6/1/2001", freq="D")._with_freq(None)
d1 = DataFrame({"v": np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
assert d2.dtypes[0] == np.dtype("M8[ns]")
d3 = d2.set_index("index")
tm.assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=["Date", "Value"])
df = df.set_index("Date")
assert df.index[0] == stamp
assert df.reset_index()["Date"][0] == stamp
@pytest.mark.slow
def test_slice_locs_indexerror():
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10) for i in range(100000)]
s = Series(range(100000), times)
s.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)]
def test_slicing_datetimes():
# GH 7523
# unique
df = DataFrame(
np.arange(4.0, dtype="float64"),
index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 3, 4]],
)
result = df.loc[datetime(2001, 1, 1, 10) :]
tm.assert_frame_equal(result, df)
result = df.loc[: datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11) :]
expected = df.iloc[1:]
tm.assert_frame_equal(result, expected)
result = df.loc["20010101 11":]
tm.assert_frame_equal(result, expected)
# duplicates
df = DataFrame(
np.arange(5.0, dtype="float64"),
index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 2, 3, 4]],
)
result = df.loc[datetime(2001, 1, 1, 10) :]
tm.assert_frame_equal(result, df)
result = df.loc[: datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11) :]
expected = df.iloc[1:]
tm.assert_frame_equal(result, expected)
result = df.loc["20010101 11":]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_datetime_tz_pytz():
from pytz import timezone as tz
N = 50
# testing with timezone, GH #2785
rng = date_range("1/1/1990", periods=N, freq="H", tz="US/Eastern")
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
tm.assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
date = tz("US/Central").localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
tm.assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil():
from dateutil.tz import tzutc
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
tz = (
lambda x: tzutc() if x == "UTC" else gettz(x)
) # handle special case for utc in dateutil
N = 50
# testing with timezone, GH #2785
rng = date_range("1/1/1990", periods=N, freq="H", tz="America/New_York")
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
tm.assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
result[datetime(1990, 1, 1, 3, tzinfo=tz("America/Chicago"))] = 0
result[datetime(1990, 1, 1, 3, tzinfo=tz("America/Chicago"))] = ts[4]
tm.assert_series_equal(result, ts)
def test_getitem_setitem_datetimeindex():
N = 50
# testing with timezone, GH #2785
rng = date_range("1/1/1990", periods=N, freq="H", tz="US/Eastern")
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04:00:00"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04:00:00"] = 0
result["1990-01-01 04:00:00"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
tm.assert_series_equal(result, ts)
lb = "1990-01-01 04:00:00"
rb = "1990-01-01 07:00:00"
# GH#18435 strings get a pass from tzawareness compat
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
lb = "1990-01-01 04:00:00-0500"
rb = "1990-01-01 07:00:00-0500"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
# But we do not give datetimes a pass on tzawareness compat
# TODO: do the same with Timestamps and dt64
msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
naive = datetime(1990, 1, 1, 4)
with tm.assert_produces_warning(FutureWarning):
# GH#36148 will require tzawareness compat
result = ts[naive]
expected = ts[4]
assert result == expected
result = ts.copy()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4)] = 0
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4)] = ts[4]
tm.assert_series_equal(result, ts)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result = ts[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = 0
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = ts[4:8]
tm.assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
rb = datetime(1990, 1, 1, 7)
msg = r"Invalid comparison between dtype=datetime64\[ns, US/Eastern\] and datetime"
with pytest.raises(TypeError, match=msg):
# tznaive vs tzaware comparison is invalid
# see GH#18376, GH#18162
ts[(ts.index >= lb) & (ts.index <= rb)]
lb = pd.Timestamp(datetime(1990, 1, 1, 4)).tz_localize(rng.tzinfo)
rb = pd.Timestamp(datetime(1990, 1, 1, 7)).tz_localize(rng.tzinfo)
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result.iloc[4:8] = ts.iloc[4:8]
tm.assert_series_equal(result, ts)
# also test partial date slicing
result = ts["1990-01-02"]
expected = ts[24:48]
tm.assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-02"] = 0
result["1990-01-02"] = ts[24:48]
tm.assert_series_equal(result, ts)
def test_getitem_setitem_periodindex():
from pandas import period_range
N = 50
rng = period_range("1/1/1990", periods=N, freq="H")
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04"] = 0
result["1990-01-01 04"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts["1990-01-01 04":"1990-01-01 07"]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04":"1990-01-01 07"] = 0
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
tm.assert_series_equal(result, ts)
lb = "1990-01-01 04"
rb = "1990-01-01 07"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
# GH 2782
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result.iloc[4:8] = ts.iloc[4:8]
tm.assert_series_equal(result, ts)
def test_datetime_indexing():
index = date_range("1/1/2000", "1/7/2000")
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp("1/8/2000")
with pytest.raises(KeyError, match=re.escape(repr(stamp))):
s[stamp]
s[stamp] = 0
assert s[stamp] == 0
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
with pytest.raises(KeyError, match=re.escape(repr(stamp))):
s[stamp]
s[stamp] = 0
assert s[stamp] == 0
"""
test duplicates in time series
"""
@pytest.fixture
def dups():
dates = [
datetime(2000, 1, 2),
datetime(2000, 1, 2),
datetime(2000, 1, 2),
datetime(2000, 1, 3),
datetime(2000, 1, 3),
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 4),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
]
return Series(np.random.randn(len(dates)), index=dates)
def test_constructor(dups):
assert isinstance(dups, Series)
assert isinstance(dups.index, DatetimeIndex)
def test_is_unique_monotonic(dups):
assert not dups.index.is_unique
def test_index_unique(dups):
uniques = dups.index.unique()
expected = DatetimeIndex(
[
datetime(2000, 1, 2),
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
]
)
assert uniques.dtype == "M8[ns]" # sanity
tm.assert_index_equal(uniques, expected)
assert dups.index.nunique() == 4
# #2563
assert isinstance(uniques, DatetimeIndex)
dups_local = dups.index.tz_localize("US/Eastern")
dups_local.name = "foo"
result = dups_local.unique()
expected = DatetimeIndex(expected, name="foo")
expected = expected.tz_localize("US/Eastern")
assert result.tz is not None
assert result.name == "foo"
tm.assert_index_equal(result, expected)
# NaT, note this is excluded
arr = [1370745748 + t for t in range(20)] + [iNaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
arr = [
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)
] + [NaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
def test_duplicate_dates_indexing(dups):
ts = dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
tm.assert_series_equal(result, expected)
else:
tm.assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
tm.assert_series_equal(cp, expected)
key = datetime(2000, 1, 6)
with pytest.raises(KeyError, match=re.escape(repr(key))):
ts[key]
# new index
ts[datetime(2000, 1, 6)] = 0
assert ts[datetime(2000, 1, 6)] == 0
def test_range_slice():
idx = DatetimeIndex(["1/1/2000", "1/2/2000", "1/2/2000", "1/3/2000", "1/4/2000"])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts["1/2/2000":]
expected = ts[1:]
tm.assert_series_equal(result, expected)
result = ts["1/2/2000":"1/3/2000"]
expected = ts[1:4]
tm.assert_series_equal(result, expected)
def test_groupby_average_dup_values(dups):
result = dups.groupby(level=0).mean()
expected = dups.groupby(dups.index).mean()
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import numpy as np
np.random.seed(1337) # for reproducibility
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics._regression import r2_score, mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score
from dbn import SupervisedDBNRegression
from dbn import SupervisedDBNClassification
import time
import matplotlib.pyplot as plt
def testing(csv_filepath , save_output_image , model_path):
df = pd.read_csv(csv_filepath)
dt = list()
for i in range(len(df)):
dt.append(df['Date'][i]+ " " +df['Time'][i])
df['DateTime'] = dt
df['DateTime'] = pd.to_datetime(df['DateTime'])
df.index = df['DateTime']
df.drop(df[df['Volume']==0].index , axis=0 ,inplace=True)
idx = df[df['Low']==df['High']].index
df.drop(idx , axis=0 , inplace=True)
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
import pandas
from copy import deepcopy
from palm.base.target_data import TargetData
from palm.discrete_state_trajectory import DiscreteStateTrajectory,\
DiscreteDwellSegment
class BlinkTargetData(TargetData):
"""
A dwell trajectory loaded from a file. The trajectory
should be a series of dark and bright observations and
the duration of each observation.
Expecting csv file with this format:
class,dwell time
dark,1.5
bright,0.3
dark,1.2
bright,0.1
.
.
.
Attributes
----------
trajectory_factory : class
A class that makes Trajectory objects.
segment_factory : class
A class that makes TrajectorySegment objects.
trajectory : Trajectory
Represents a time trace of dark and bright observations.
filename : string
The trajectory data is loaded from this path.
"""
def __init__(self):
super(BlinkTargetData, self).__init__()
self.trajectory_factory = DiscreteStateTrajectory
self.segment_factory = DiscreteDwellSegment
self.trajectory = None
self.filename = None
def __len__(self):
return len(self.trajectory)
def load_data(self, data_file):
"""
Load trajectory from file.
Parameters
----------
data_file : string
Path of file to load.
"""
self.filename = data_file
data_table = | pandas.read_csv(data_file, header=0) | pandas.read_csv |
import unittest
import pandas as pd
import numpy as np
from ..timeseries import TimeSeries
class TimeSeriesTestCase(unittest.TestCase):
times = pd.date_range('20130101', '20130110')
pd_series1 = pd.Series(range(10), index=times)
pd_series2 = pd.Series(range(5, 15), index=times)
pd_series3 = pd.Series(range(15, 25), index=times)
series1: TimeSeries = TimeSeries(pd_series1)
series2: TimeSeries = TimeSeries(pd_series1, pd_series2, pd_series3)
series3: TimeSeries = TimeSeries(pd_series2)
def test_creation(self):
with self.assertRaises(ValueError):
# Index is dateTimeIndex
TimeSeries(pd.Series(range(10), range(10)))
with self.assertRaises(ValueError):
# Conf interval must be same length as main series
pd_lo = pd.Series(range(5, 14), index=pd.date_range('20130101', '20130109'))
TimeSeries(self.pd_series1, pd_lo)
with self.assertRaises(ValueError):
# Conf interval must have same time index as main series
pd_lo = pd.Series(range(5, 15), index=pd.date_range('20130102', '20130111'))
TimeSeries(self.pd_series1, pd_lo)
with self.assertRaises(ValueError):
# Conf interval must be same length as main series
pd_hi = pd.Series(range(5, 14), index=pd.date_range('20130101', '20130109'))
TimeSeries(self.pd_series1, None, pd_hi)
with self.assertRaises(ValueError):
# Conf interval must have same time index as main series
pd_lo = pd.Series(range(5, 15), index=pd.date_range('20130102', '20130111'))
TimeSeries(self.pd_series1, None, pd_lo)
with self.assertRaises(ValueError):
# Main series cannot have date holes
range_ = pd.date_range('20130101', '20130104').append(pd.date_range('20130106', '20130110'))
TimeSeries(pd.Series(range(9), index=range_))
series_test = TimeSeries(self.pd_series1, self.pd_series2, self.pd_series3)
self.assertTrue(series_test.pd_series().equals(self.pd_series1))
self.assertTrue(series_test.conf_lo_pd_series().equals(self.pd_series2))
self.assertTrue(series_test.conf_hi_pd_series().equals(self.pd_series3))
def test_alt_creation(self):
with self.assertRaises(ValueError):
# Series cannot be lower than three
index = pd.date_range('20130101', '20130102')
TimeSeries.from_times_and_values(index, self.pd_series1.values[:2])
with self.assertRaises(ValueError):
# all array must have same length
TimeSeries.from_times_and_values(self.pd_series1.index,
self.pd_series1.values[:-1],
self.pd_series2[:-2],
self.pd_series3[:-1])
# test if reordering is correct
rand_perm = np.random.permutation(range(1, 11))
index = pd.to_datetime(['201301{:02d}'.format(i) for i in rand_perm])
series_test = TimeSeries.from_times_and_values(index, self.pd_series1.values[rand_perm-1],
self.pd_series2[rand_perm-1],
self.pd_series3[rand_perm-1].tolist())
self.assertTrue(series_test.start_time() == pd.to_datetime('20130101'))
self.assertTrue(series_test.end_time() == pd.to_datetime('20130110'))
self.assertTrue(series_test.pd_series().equals(self.pd_series1))
self.assertTrue(series_test.conf_lo_pd_series().equals(self.pd_series2))
self.assertTrue(series_test.conf_hi_pd_series().equals(self.pd_series3))
self.assertTrue(series_test.freq() == self.series1.freq())
# TODO test over to_dataframe when multiple features choice is decided
def test_eq(self):
seriesA: TimeSeries = TimeSeries(self.pd_series1)
self.assertTrue(self.series1 == seriesA)
# with a defined CI
seriesB: TimeSeries = TimeSeries(self.pd_series1,
confidence_hi=pd.Series(range(10, 20),
index=pd.date_range('20130101', '20130110')))
self.assertFalse(self.series1 == seriesB)
self.assertTrue(self.series1 != seriesB)
# with different dates
seriesC = TimeSeries(pd.Series(range(10), index=pd.date_range('20130102', '20130111')))
self.assertFalse(self.series1 == seriesC)
# compare with both CI
seriesD: TimeSeries = TimeSeries(self.pd_series1, self.pd_series2, self.pd_series3)
seriesE: TimeSeries = TimeSeries(self.pd_series1, self.pd_series3, self.pd_series2)
self.assertTrue(self.series2 == seriesD)
self.assertFalse(self.series2 == seriesE)
def test_dates(self):
self.assertEqual(self.series1.start_time(), pd.Timestamp('20130101'))
self.assertEqual(self.series1.end_time(), pd.Timestamp('20130110'))
self.assertEqual(self.series1.duration(), pd.Timedelta(days=9))
def test_slice(self):
# base case
seriesA = self.series1.slice(pd.Timestamp('20130104'), | pd.Timestamp('20130107') | pandas.Timestamp |
#python imports
import os
import gc
import string
import random
import time
import pickle
import shutil
from datetime import datetime
#internal imports
from modules.Signal import Signal
from modules.Database import Database
from modules.Predictor import Classifier, ComplexBuilder
from modules.utils import calculateDistanceP, chunks, cleanPath, minMaxNorm, extractMeanByBounds, extractMetricByShiftBounds
import joblib
from joblib import Parallel, delayed, dump, load
import pandas as pd
import numpy as np
from collections import OrderedDict
from itertools import combinations
from multiprocessing import Pool, Value
from joblib import wrap_non_picklable_objects
#plotting
import matplotlib.pyplot as plt
import seaborn as sns
#sklearn imports
from sklearn.metrics import classification_report, homogeneity_score, v_measure_score, completeness_score
from sklearn.model_selection import ParameterGrid
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import RadiusNeighborsRegressor, KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale, minmax_scale, robust_scale
from scipy.stats import ttest_ind, f_oneway
#dimensional reduction
import umap
__VERSION__ = "0.4.48"
filePath = os.path.dirname(os.path.realpath(__file__))
pathToTmp = os.path.join(filePath,"tmp")
alignModels = { "LinearRegression": LinearRegression,
"RadiusNeighborsRegressor" : RadiusNeighborsRegressor,
"KNeighborsRegressor":KNeighborsRegressor
}
alignModelsParams = {
"LinearRegression": {},
"RadiusNeighborsRegressor" : {"weights":"distance","radius":30} ,
"KNeighborsRegressor":{"weights":"distance","n_neighbors":10}
}
STACKING_CLASSIFIER_GRID = {
'rf__max_depth': [70,None,30],#30,,
#'rf__max_features': ['auto'],
# 'rf__min_samples_leaf': [2, 3, 5],
'rf__min_samples_split': [2,4],#[2, 3, 4],
#'rf__n_estimators': [200],
"SVM__C" : [1, 10,1000],
"SVM__kernel": ['rbf','poly'],
'SVM__gamma': [0.01,10,100]
}
OPTICS_PARAM_GRID = {
"min_samples":[2,3,5,8],
"max_eps": [np.inf,2,1,0.9,0.8],
"xi": np.linspace(0,0.3,num=30),
"cluster_method" : ["xi"]
}
AGGLO_PARAM_GRID = {
"n_clusters":[None,115,110,105,100,90,95],
"distance_threshold":[None,0.5,0.4,0.2,0.1,0.05,0.01],
"linkage":["complete","single","average"]
}
AFF_PRO_PARAM = {"damping":np.linspace(0.5,1,num=50)}
HDBSCAN_PROPS = {
"min_cluster_size":[2,3,4,6],
"min_samples":[2,3,4,5]
}
#{"min_cluster_size":[2,3,4,6],"min_samples":[2,3,4,5,8,10]}
CLUSTER_PARAMS = {
"OPTICS":OPTICS_PARAM_GRID,
"AGGLOMERATIVE_CLUSTERING":AGGLO_PARAM_GRID,
"AFFINITY_PROPAGATION":AFF_PRO_PARAM,
"HDBSCAN":HDBSCAN_PROPS
}
svm_param_grid = {
'C': [1, 10, 100, 1000],
'kernel': ['linear','rbf','poly'],
'gamma': [0.01,0.1,1,2,3,4,5]
}
RF_GRID_SEARCH = {
'max_depth': [70,None,30,50,10],#30,,,50,5
'max_features': ['auto'],
'min_samples_leaf': [2,5,3,15], # 5, 15
'min_samples_split': [2 ,3,10],
'n_estimators': [300, 500, 600]
}
entriesInChunks = dict()
class ComplexFinder(object):
def __init__(self,
addImpurity = 0.0,
alignMethod = "RadiusNeighborsRegressor",#"RadiusNeighborsRegressor",#"KNeighborsRegressor",#"LinearRegression", # RadiusNeighborsRegressor
alignRuns = False,
alignWindow = 3,
allowSingleFractionQuant = False,
analysisMode = "label-free", #[label-free,SILAC,SILAC-TMT]
analysisName = None,
binaryDatabase = False,
classifierClass = "random_forest",
classifierTestSize = 0.25,
classiferGridSearch = RF_GRID_SEARCH,#STACKING_CLASSIFIER_GRID,#
compTabFormat = False,
considerOnlyInteractionsPresentInAllRuns = 2,
correlationWindowSize = 5,
databaseFilter = {'Organism': ["Human"]},#{'Organism': ["Human"]},#{"Confidence" : [1,2,3,4]} - for hu.map2.0,# {} for HUMAN_COMPLEX_PORTAL
databaseIDColumn = "subunits(UniProt IDs)",
databaseFileName = "20190823_CORUM.txt",#"humap2.txt
databaseHasComplexAnnotations = True,
databaseEntrySplitString = ";",
decoySizeFactor = 1.2,
grouping = {"WT": ["D3_WT_03.txt"]},
hdbscanDefaultKwargs = {"min_cluster_size":4,"min_samples":1},
indexIsID = False,
idColumn = "Uniprot ID",
interactionProbabCutoff = 0.7,
justFitAndMatchPeaks = False,
keepOnlySignalsValidInAllConditions = False,
kFold = 3,
maxPeaksPerSignal = 15,
maxPeakCenterDifference = 1.8,
metrices = ["apex","pearson","euclidean","cosine","max_location","rollingCorrelation"], #"umap-dist"
metricesForPrediction = None,#["pearson","euclidean","apex"],
metricQuantileCutoff = 0.001,
minDistanceBetweenTwoPeaks = 3,
minimumPPsPerFeature = 6,
minPeakHeightOfMax = 0.05,
n_jobs = 12,
noDatabaseForPredictions = False,
normValueDict = {},
noDistanceCalculationAndPrediction = False,
peakModel = "LorentzianModel",#"GaussianModel",#"SkewedGaussianModel",#"LorentzianModel",
plotSignalProfiles = False,
plotComplexProfiles = False,
precision = 0.5,
r2Thresh = 0.85,
removeSingleDataPointPeaks = True,
restartAnalysis = False,
retrainClassifier = False,
recalculateDistance = False,
rollingWinType = None,
runName = None,
scaleRawDataBeforeDimensionalReduction = True,
smoothSignal = True,
smoothWindow = 2,
takeRondomSampleFromData =False,
topNCorrFeaturesForUMAPAlignment = 200,
TMTPoolMethod = "sum",
transformQuantDataBy = None,
useRawDataForDimensionalReduction = False,
useFWHMForQuant = True,
umapDefaultKwargs = {"min_dist":0.001,"n_neighbors":5,"n_components":2,"random_state":120},
quantFiles = [],
usePeakCentricFeatures = False
):
"""
Init ComplexFinder Class
Parameters
----------
* alignMethod = "RadiusNeighborsRegressor",
* alignRuns = False,
Alignment of runs is based on signal profiles that were found to have
a single modelled peak. A refrence run is assign by correlation anaylsis
and choosen based on a maximum R2 value. Then fraction-shifts per signal
profile is calculated (must be in the window given by *alignWindow*).
The fraction residuals are then modelled using the method provided in
*alignMethod*. Model peak centers are then adjusted based on the regression results.
Of note, the alignment is performed after peak-modelling and before distance calculations.
* alignWindow = 3,
Number of fraction +/- single-peal profile are accepted for the run alignment.
* analysisMode = "label-free", #[label-free,SILAC,SILAC-TMT]
* analysisName = None,
* binaryDatabase = False,
* classifierClass = "random_forest",
* classifierTestSize = 0.25,
Fraction of the created database containing positive and negative protein-protein
interactions that will be used for testing (for example ROC curve analysis) and classification report.
* classiferGridSearch = RF_GRID_SEARCH.
Dict with keywords matching parameters/settings of estimator (SVM, random forest)
and list of values forming the grid used to find the best estimator settings (evaluated
by k-fold cross validation). Runtime is effected by number of parameter settings as well as k-fold.
* compTabFormat = False
True indicates that the data are in the CompBat data format which was recently introduced.
In contrast to standard txt files generated by for example MaxQuant. It contains multiple
headers. More information can be found here https://www3.cmbi.umcn.nl/cedar/browse/comptab
ComplexFinder will try to identifiy the samples and fractions and create separeted txt files.
* considerOnlyInteractionsPresentInAllRuns = 2,
Can be either bool to filter for protein - protein interactions that are present
in all runs. If an integer is provided. the pp interactions are filtered based on
the number of runs in which they were quantified. A value of 4 would indicate that
the pp interaction must have been predicted in all runs.
* correlationWindowSize = 5,
Number of fractions used for rolling pearson correlation
* databaseFilter = {'Organism': ["Human"]},
Filter dict used to find relevant complexes from database. By default,
the corum database is filtered based on the column 'Organism' using 'Mouse' as a search string.
If no filtering is required, pass an empty dict {}.
* databaseIDColumn = "subunits(UniProt IDs)",
* databaseFileName = "20190823_CORUM.txt",
* databaseHasComplexAnnotations = True,
Indicates if the provided database does contain complex annotations. If you have a database with
only pairwise interactions, this setting should be *False*. Clusters are identified by dimensional
reduction and density based clustering (HDBSCAN). In order to alter UMAP and HDBSCAN settings use the
kewywords *hdbscanDefaultKwargs* and *umapDefaultKwargs*.
* databaseEntrySplitString = ";",
String by which complex members are separated in the provided database. CORUM = ";", Embl ComplexMap = "|"
* decoySizeFactor = 1.2,
Size factor for creating the decoy database from the positive proterin connectivity database such as CORUM.
* grouping = {"WT": ["D3_WT_04.txt","D3_WT_02.txt"],"KO":["D3_KO_01.txt","D3_KO_02.txt"]},
None or dict. Indicates which samples (file) belong to one group. Let's assume 4 files with the name
'KO_01.txt', 'KO_02.txt', 'WT_01.txt' and 'WT_02.txt' are being analysed.
The grouping dict should like this : {"KO":[KO_01.txt','KO_02.txt'],"WT":['WT_01.txt','WT_02.txt']}
in order to combine them for statistical testing (e.g. t-test of log2 transformed peak-AUCs).
Note that when analysis multiple runs (e.g. grouping present) then calling ComplexFinder().run(X) - X must be a
path to a folder containing the files.
When using compTabFormat = True. Provide the sample name as <compTabFileName>:<SampleName>.
* hdbscanDefaultKwargs = {"min_cluster_size":4,"min_samples":1},
* indexIsID = False,
* idColumn = "Uniprot ID",
* interactionProbabCutoff = 0.7
Cutoff for estimator probability. Interactions with probabilities below threshold will be removed.
* keepOnlySignalsValidInAllConditions = False
If True, removes all Signals that were not found to be valid in all files (experiments).
* kFold = 3
Cross validation of classifier optimization.
* justFitAndMatchPeaks = False
If true, the pipeline stops after peak detection/model fitting and matching of peaks (if more than one file is supplied.)
* maxPeaksPerSignal = 15
Number of peaks allowed for on signal profile.
* maxPeakCenterDifference = 1.8
* metrices = ["apex","pearson","euclidean","p_pearson","max_location","umap-dist","rollingCorrelation"], Metrices to access distance between two profiles. Can be either a list of strings and/or dict. In case of a list of dicts, each dict must contain the keywords: 'fn' and 'name' providing a callable function with 'fn' that returns a single floating number and takes two arrays as an input.
* metricesForPrediction = None
* metricQuantileCutoff = 0.90
* minDistanceBetweenTwoPeaks = 3
Distance in fractions (int) between two peaks. Setting this to a smaller number results in more peaks.
* n_jobs = 12,
Number of workers to model peaks, to calculate distance pairs and to train and use the classifer.
* noDatabaseForPredictions = False,
If you want to use ComplexFinder without any database. Set this to *True*.
* normValueDict = {},
* noDistanceCalculationAndPrediction = False,
Set to *True* to use ComplexFinder without distance calculation and database prediction.
* peakModel = "GaussianModel",
Indicates which model should be used to model signal profiles. In principle all models from lmfit can be used.
However, the initial parameters are only optimized for GaussianModel and LaurentzianModel.
This might effect runtimes dramatically.
* plotSignalProfiles = False,
If True, each profile is plotted against the fractio along with the fitted models.
If you are concerned about time, you might set this to False at the cost of losing visible asessment of the fit quality.
* plotComplexProfiles = False,
* precision = 0.5
Precision to use to filter protein-protein interactions.
If None, the filtering will be performed based on the parameter *interactionProbabCutoff*.
* r2Thresh = 0.85
R2 threshold to accept a model fit. Models below the threshold will be ignored.
* removeSingleDataPointPeaks = True,
* restartAnalysis = False, bool.
Set True if you want to restart the anaylsis from scratch. If the tmp folder exsists, items and dirs will be deleted first.
* retrainClassifier = False,
Even if the trainedClassifier.sav file is found, the classifier is loaded and the training is skipped.
If you change the classifierGridSearch, you should set this to True.
This will ensure that the classifier training is never skipped.
* recalculateDistance = False,
* rollingWinType = None,
If None, all points are evenly weighted. Can be any string of scipy.signal window function.
(https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows)
* runName = None,
* <del>savePeakModels = True</del> *depracted. always True and will be removed in the next version*.
* scaleRawDataBeforeDimensionalReduction = True,
If raw data should be used (*useRawDataForDimensionalReduction*)
enable this if you want to scale them. Scaling will be performed that values of each row are scaled between zero and one.
* smoothSignal = True
Enable/disable smoothing. Defaults to True. A moving average of at least 3 adjacent datapoints is calculated using
pandas rolling function. Effects the analysis time as well as the nmaximal number of peaks detected.
* smoothWindow = 2,
* topNCorrFeaturesForUMAPAlignment = 200,
Number of profiles used for UMAP Alignment. Only used if useRawDataForDimensionalReduction = True or noDistanceCalculationAndPrediction = True. The Features
will be identified by calculating the Pearson correlation coefficient.
* useRawDataForDimensionalReduction = False, Setting this to true, will force the pipeline to use the raw values for dimensional reduction. Distance calculations are not automatically turned off and the output is generated but they are not used.
* useFWHMForQuant = True
If quantFiles is specific, will use the FWHM for peak centric quantification. By default at least a mean of +/- peak centric fraction will
be consider (e.g. 3 fraction). However you cann allow single fraction quantification for narrow peaks by setting 'allowSingleFractionQuant' to True.
* umapDefaultKwargs = {"min_dist":0.0000001,"n_neighbors":3,"n_components":2},
If you want to perform an aligned UMPA consider altering the parameter alignment_window_size and alignment_regularisation. Find more information here
(https://umap-learn.readthedocs.io/en/latest/aligned_umap_basic_usage.html#aligning-varying-parameters)
* quantFiles = dict
* Quantifiaction files. dict with key name of co-fraction file and values with the path to the quantificaation file
Assuming your grouping is something like: {"WT":["WT_01.txt","WT_02.txt"]}. Then the quantification files must
contain a key for each file: something like {"WT_01.txt":"myCoolProject/quant/WT01_quant.txt","WT_02.txt":"myCoolProject/quant/WT02_quant.txt"}.
Assuming the folder myCoolProject/ exists where the main file is.
If analysing a TMT-SILAC experiment it is required to provide TMT labelings for heavy and light peaks separately, the
provided dict should look something like this:
{
"HEAVY_WT_01.txt":"myCoolProject/quant/WT01_quant_heavy.txt",
"LIGHT_WT_01.txt":"myCoolProject/quant/WT01_quant_light.txt"
}
Returns
-------
None
"""
self.params = {
"addImpurity" : addImpurity,
"indexIsID" : indexIsID,
"idColumn" : idColumn,
"n_jobs" : n_jobs,
"kFold" : kFold,
"analysisName" : analysisName,
"restartAnalysis" : restartAnalysis,
"metrices" : metrices,
"peakModel" : peakModel,
"smoothWindow" : smoothWindow,
"classifierClass" : classifierClass,
"retrainClassifier" : retrainClassifier,
"interactionProbabCutoff":interactionProbabCutoff,
"maxPeaksPerSignal" : maxPeaksPerSignal,
"maxPeakCenterDifference" : maxPeakCenterDifference,
"classiferGridSearch" : classiferGridSearch,
"plotSignalProfiles" : plotSignalProfiles,
"savePeakModels" : True, #must be true to process pipeline, depracted, remove from class arguments.
"removeSingleDataPointPeaks" : removeSingleDataPointPeaks,
"grouping" : grouping,
"analysisMode" : analysisMode,
"normValueDict" : normValueDict,
"databaseFilter" : databaseFilter,
"databaseIDColumn" : databaseIDColumn,
"databaseFileName" : databaseFileName,
"databaseHasComplexAnnotations" : databaseHasComplexAnnotations,
"r2Thresh" : r2Thresh,
"smoothSignal" : smoothSignal,
"umapDefaultKwargs" : umapDefaultKwargs,
"hdbscanDefaultKwargs" : hdbscanDefaultKwargs,
"noDatabaseForPredictions" : noDatabaseForPredictions,
"alignRuns" : alignRuns,
"alignMethod" : alignMethod,
"runName" : runName,
"useRawDataForDimensionalReduction" : useRawDataForDimensionalReduction,
"scaleRawDataBeforeDimensionalReduction" : scaleRawDataBeforeDimensionalReduction,
"metricQuantileCutoff": metricQuantileCutoff,
"recalculateDistance" : recalculateDistance,
"metricesForPrediction" : metricesForPrediction,
"minDistanceBetweenTwoPeaks" : minDistanceBetweenTwoPeaks,
"minimumPPsPerFeature" : minimumPPsPerFeature,
"plotComplexProfiles" : plotComplexProfiles,
"decoySizeFactor" : decoySizeFactor,
"classifierTestSize" : classifierTestSize,
"considerOnlyInteractionsPresentInAllRuns" : considerOnlyInteractionsPresentInAllRuns,
"precision" : precision,
"quantFiles" : quantFiles,
"compTabFormat" : compTabFormat,
"correlationWindowSize" : correlationWindowSize,
"takeRondomSampleFromData" : takeRondomSampleFromData,
"minPeakHeightOfMax" : minPeakHeightOfMax,
"justFitAndMatchPeaks" : justFitAndMatchPeaks,
"keepOnlySignalsValidInAllConditions" : keepOnlySignalsValidInAllConditions,
"noDistanceCalculationAndPrediction" : noDistanceCalculationAndPrediction,
"topNCorrFeaturesForUMAPAlignment" : topNCorrFeaturesForUMAPAlignment,
"databaseEntrySplitString": databaseEntrySplitString,
"version" : __VERSION__,
"usePeakCentricFeatures" : usePeakCentricFeatures,
"allowSingleFractionQuant" : allowSingleFractionQuant,
"useFWHMForQuant" : useFWHMForQuant,
"TMTPoolMethod" : TMTPoolMethod,
"transformQuantDataBy" : transformQuantDataBy
}
print("\n" + str(self.params))
self._checkParameterInput()
def _addMetricesToDB(self,analysisName):
"""
Adds distance metrices to the database entries
that were found in the co-elution profiles.
Parameters
----------
Returns
-------
None
"""
if self.params["noDistanceCalculationAndPrediction"]:
print("Info :: Skipping matching metrices to DB.")
return
if "signalDiff" in self.params["metrices"]:
self.params["metrices"] = [x for x in self.params["metrices"] if x != "signalDiff"] + ["{}-diff".format(x) for x in np.arange(self.Xs[analysisName].columns.size)]
metricColumns = self.params["metrices"]
if not self.params["noDatabaseForPredictions"]:
self.DB.matchMetrices(self.params["pathToTmp"][analysisName],entriesInChunks[analysisName],metricColumns,analysisName,forceRematch=self.params["recalculateDistance"])
def _addMetricToStats(self,metricName, value):
"""
Adds a metric to the stats data frame.
Does not check if metric is represent, if present,
it will just overwrite.
Parameters
----------
metricName str
Name of metric to add
value str
Value of metric
Returns
-------
None
"""
if metricName in self.stats.columns:
self.stats.loc[self.currentAnalysisName,metricName] = value
def _addModelToSignals(self,signalModels):
"""
Adds fitted models to Signals. If not a valid
model was found, then the signal profile is removed.
Parameters
----------
signalModels - list
List of modelfits (dict)
Returns
-------
None
"""
for fitModel in signalModels:
modelID = fitModel["id"]
if len(fitModel) == 1:
del self.Signals[self.currentAnalysisName][modelID]
if modelID in self.Signals[self.currentAnalysisName]:
for k,v in fitModel.items():
if k != 'id':
setattr(self.Signals[self.currentAnalysisName][modelID],k,v)
self.Signals[self.currentAnalysisName][modelID].saveResults()
def _attachQuantificationDetails(self, combinedPeakModels = None):
"""
"""
if self.params["analysisMode"] == "label-free":
if len(self.params["quantFiles"]) != 0:
print("Warning :: Quant files have been specified but anaylsis mode is label-free. Please define SILAC or TMT or SILAC-TMT")
print("Info :: Label-free mode selected. No additation quantification performed..")
return
if len(self.params["quantFiles"]) > 0:
files = np.array(list(self.params["grouping"].values())).flatten()
print(files)
print(self.params["quantFiles"].keys())
if len(self.params["quantFiles"]) != files.size and self.params["analysisMode"] != "SILAC-TMT":
print("Warning :: Different number of quantFiles and groupings provided.")
if self.params["analysisMode"] != "SILAC-TMT":
initFilesFound = [k for k in self.params["quantFiles"].keys() if k in files]
else:
print(self.params["quantFiles"])
for k in self.params["quantFiles"].keys():
print(k.split("HEAVY_",maxsplit=1))
initFilesFound = [k for k in self.params["quantFiles"].keys() if k.split("HEAVY_",maxsplit=1)[-1] in files or k.split("LIGHT_",maxsplit=1)[-1] in files]
print("Info :: For the following files and correpsonding co-elution profile data was detected")
print(initFilesFound)
print("Warning :: other files will be ignored.")
# elif self.params["analysisMode"] == "SILAC-TMT":
# if not all(f.startswith("HEAVY") or f.startswith("LIGHT") for f in self.params["quantFiles"].keys()):
# print("Warning :: If using a SILAC-TMT experiment, please provide 'HEAVY' and 'LIGHT' before the file in the dict 'quantFile' such as 'HEAVY_WT_01.txt':<path to quant file> as well as 'LIGHT_WT_01.txt':<path to quant file>")
print("combining Peaks!!")
if combinedPeakModels is None:
## load combined peak reuslts
txtOutput = os.path.join(self.params["pathToComb"],"CombinedPeakModelResults.txt")
if os.path.exists(txtOutput):
combinedPeakModels = pd.read_csv(txtOutput,sep="\t")
else:
print("Warning :: Combined peak model reuslts not found. Deleted? Skipping peak centric quantification.")
return
print("Info :: Starting peak centric quantification. In total {} peaks were found".format(combinedPeakModels.index.size))
print("Info :: Loading quantification files.")
if not all(os.path.exists(pathToQuantFile) for pathToQuantFile in self.params["quantFiles"].values()):
print("Warning :: Not all quant files found!")
if self.params["analysisMode"] != "SILAC-TMT":
print(self.params["quantFiles"].values())
path = list(self.params["quantFiles"].values())
print(os.path.abspath(path[0]))
quantFilesLoaded = [(k,pd.read_csv(v,sep="\t",index_col = 0),False) for k,v in self.params["quantFiles"].items() if os.path.exists(v) and k in initFilesFound]
else:
quantFilesLoaded = [(k.split("HEAVY_",maxsplit=1)[-1] if "HEAVY" in k else k.split("LIGHT_",maxsplit=1)[-1],pd.read_csv(v,sep="\t",index_col = 0),"LIGHT" in k) for k,v in self.params["quantFiles"].items() if os.path.exists(v) and k in initFilesFound]
if len(quantFilesLoaded) == 0:
print("Warning :: No quant files found. Skipping peak-centric quantification.")
return
if self.params["analysisMode"] == "SILAC":
print("Info :: Peak centric quantification using SILAC :: Assuming one SILAC ratio per fraction .")
elif self.params["analysisMode"] == "TMT":
print("Info :: Peak centric quantification using TMT :: Assuming the following order:")
print("Ignoring column headers, just uses the column index as follow..")
print("Fraction 1 - TMT reporter 1, Fraction 1 - TMT reporter 2, Faction 2 - TMT reporter 3 .... Fraction 2 - TMT reporter 1")
extractedQuantFiles = []
for k,quantFile,isLightQuantData in quantFilesLoaded:
print("Info :: Quantification of ", k)
centerColumnName = "Center_{}".format(k)
fwhmColumnName = "fwhm_{}".format(k)
quantFileName = "Q({})".format(k)
combinedPeakModelsFiltered = combinedPeakModels.dropna(subset=[centerColumnName])
lowerBound = combinedPeakModelsFiltered[centerColumnName] - combinedPeakModelsFiltered[fwhmColumnName]/1.7
upperBound = combinedPeakModelsFiltered[centerColumnName] + combinedPeakModelsFiltered[fwhmColumnName]/1.7
peakBounds = np.concatenate([lowerBound.values.reshape(-1,1),upperBound.values.reshape(-1,1)],axis=1)
peakBounds[:,1] += 1 #add one extra to use bounds as a range in python
#check bounds
peakBounds[peakBounds[:,0] < 0, 0] = 0
peakBounds[peakBounds[:,1] >= quantFile.columns.size, 1] = quantFile.columns.size - 1
#transform bounds to ints
peakBounds = np.around(peakBounds,0).astype(np.int64)
quantData = quantFile.loc[combinedPeakModelsFiltered["Key"].values].values
if self.params["analysisMode"] == "SILAC":
print("Info :: Peak centric quantification using SILAC :: extracting mean from file {}.".format(k))
out = extractMeanByBounds(
NPeakModels = combinedPeakModelsFiltered.index.size,
peakBounds = peakBounds,
quantData = quantData
)
quantColumnNames = ["SILAC({})_Mean".format(quantFileName),"SILAC({})_Error".format(quantFileName)]
print(out)
print(quantColumnNames)
dfResult = | pd.DataFrame(out,index=combinedPeakModelsFiltered.index, columns = quantColumnNames) | pandas.DataFrame |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_run_if_out_of_bounds():
algo = algos.RunIfOutOfBounds(0.5)
dts = pd.date_range('2010-01-01', periods=3)
s = bt.Strategy('s')
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.temp['selected'] = ['c1', 'c2']
s.temp['weights'] = {'c1': .5, 'c2':.5}
s.update(dts[0])
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c1']._weight = 0.5
s.children['c2']._weight = 0.5
assert not algo(s)
s.children['c1']._weight = 0.25
s.children['c2']._weight = 0.75
assert not algo(s)
s.children['c1']._weight = 0.24
s.children['c2']._weight = 0.76
assert algo(s)
s.children['c1']._weight = 0.75
s.children['c2']._weight = 0.25
assert not algo(s)
s.children['c1']._weight = 0.76
s.children['c2']._weight = 0.24
assert algo(s)
def test_run_after_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDate('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert algo(target)
def test_run_after_days():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDays(3)
assert not algo(target)
assert not algo(target)
assert not algo(target)
assert algo(target)
def test_set_notional():
algo = algos.SetNotional('notional')
s = bt.FixedIncomeStrategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
notional = pd.Series(index=dts[:2], data=[1e6, 5e6])
s.setup( data, notional = notional )
s.update(dts[0])
assert algo(s)
assert s.temp['notional_value'] == 1e6
s.update(dts[1])
assert algo(s)
assert s.temp['notional_value'] == 5e6
s.update(dts[2])
assert not algo(s)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_rebalance_updatecount():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.use_integer_positions(False)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4','c5'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
assert s.value == 1000
assert s.capital == 0
# Update is called once when each weighted security is created (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[1])
s.temp['weights'] = {'c1': 0.5, 'c2':0.5}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[2])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (2)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 6
def test_rebalance_fixedincome():
algo = algos.Rebalance()
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
s = bt.FixedIncomeStrategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
coupons = pd.DataFrame(index=dts, columns=['c2'], data=0)
s.setup(data, coupons=coupons)
s.update(dts[0])
s.temp['notional_value'] = 1000
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000
c1 = s['c1']
assert c1.value == 1000
assert c1.notional_value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000*100
c2 = s['c2']
assert c1.value == 0
assert c1.notional_value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000*100
assert c2.notional_value == 1000
assert c2.position == 1000
assert c2.weight == 1.
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectAll(include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectAll(include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly_n_none():
algo = algos.SelectRandomly(n=None) # Behaves like SelectAll
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectRandomly(n=None, include_no_data=True)
assert algo2(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectRandomly(n=None, include_negative=True)
assert algo3(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[0]] = np.nan
data['c2'][dts[0]] = 95
data['c3'][dts[0]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectRandomly(n=1)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
random.seed(1000)
algo = algos.SelectRandomly(n=1, include_negative=True)
assert algo(s)
assert s.temp.pop('selected') == ['c3']
random.seed(1009)
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c1']
random.seed(1009)
# If selected already set, it will further filter it
s.temp['selected'] = ['c2']
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
def test_select_these():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
algo = algos.SelectThese( ['c1'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectThese( ['c1', 'c2'], include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectThese(['c1', 'c2'], include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where_all():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
s.setup(data, where = where)
s.update(dts[0])
algo = algos.SelectWhere('where')
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectWhere('where', include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectWhere('where', include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere('where')
s.setup(data, where=where)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_where_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere(where)
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_regex():
s = bt.Strategy('s')
algo = algos.SelectRegex( 'c1' )
s.temp['selected'] = ['a1', 'c1', 'c2', 'c11', 'cc1']
assert algo( s )
assert s.temp['selected'] == ['c1', 'c11', 'cc1']
algo = algos.SelectRegex( '^c1$' )
assert algo( s )
assert s.temp['selected'] == ['c1']
def test_resolve_on_the_run():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'b1'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c2'][dts[2]] = -5
on_the_run = pd.DataFrame(index=dts, columns=['c'], data='c1')
on_the_run.loc[dts[2], 'c'] = 'c2'
s.setup(data, on_the_run = on_the_run)
s.update(dts[0])
s.temp['selected'] = ['c', 'b1']
algo = algos.ResolveOnTheRun( 'on_the_run' )
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# make sure don't keep nan
s.update(dts[1])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
# if specify include_no_data then 2
algo2 = algos.ResolveOnTheRun('on_the_run', include_no_data=True)
s.temp['selected'] = ['c', 'b1']
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# behavior on negative prices
s.update(dts[2])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
algo3 = algos.ResolveOnTheRun('on_the_run', include_negative=True)
s.temp['selected'] = ['c', 'b1']
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c2' in selected
assert 'b1' in selected
def test_select_types():
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
c3 = bt.HedgeSecurity('c3')
c4 = bt.CouponPayingHedgeSecurity('c4')
c5 = bt.FixedIncomeSecurity('c5')
s = bt.Strategy('p', children = [c1, c2, c3, c4, c5])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4', 'c5'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
algo = algos.SelectTypes(include_types=(bt.Security, bt.HedgeSecurity), exclude_types=())
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3'])
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,), exclude_types=(bt.CouponPayingSecurity,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3', 'c5'])
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c2', 'c3'])
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_scale_weights():
s = bt.Strategy('s')
algo = algos.ScaleWeights( -0.5 )
s.temp['weights'] = {'c1': 0.5, 'c2': -0.4, 'c3':0 }
assert algo( s )
assert s.temp['weights'] == {'c1':-0.25, 'c2':0.2, 'c3':0}
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_target():
algo = algos.WeighTarget('target')
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
target = pd.DataFrame(index=dts[:2], columns=['c1', 'c2'], data=0.5)
target['c1'].loc[dts[1]] = 1.0
target['c2'].loc[dts[1]] = 0.0
s.setup( data, target = target )
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.5
assert weights['c2'] == 0.5
s.update(dts[1])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 1.0
assert weights['c2'] == 0.0
s.update(dts[2])
assert not algo(s)
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data['c1'].loc[dts[1]] = 105
data['c1'].loc[dts[2]] = 95
data['c1'].loc[dts[3]] = 105
data['c1'].loc[dts[4]] = 95
# low vol c2
data['c2'].loc[dts[1]] = 100.1
data['c2'].loc[dts[2]] = 99.9
data['c2'].loc[dts[3]] = 100.1
data['c2'].loc[dts[4]] = 99.9
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c2'] > weights['c1']
aae(weights['c1'], 0.020, 3)
aae(weights['c2'], 0.980, 3)
@mock.patch('ffn.calc_mean_var_weights')
def test_weigh_mean_var(mock_mv):
algo = algos.WeighMeanVar(lookback=pd.DateOffset(days=5))
mock_mv.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_mv.called
rets = mock_mv.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_randomly():
s = bt.Strategy('s')
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.WeighRandomly()
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
assert sum( weights.values() ) == 1.
algo = algos.WeighRandomly( (0.3,0.5), 0.95)
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
aae( sum( weights.values() ), 0.95 )
for c in s.temp['selected']:
assert weights[c] <= 0.5
assert weights[c] >= 0.3
def test_set_stat():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( 'test_stat' )
s.setup(data, test_stat = stat)
s.update(dts[0])
print()
print(s.get_data('test_stat'))
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_set_stat_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( stat )
s.setup(data)
s.update(dts[0])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_stat_total_return():
algo = algos.StatTotalReturn(lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
stat = s.temp['stat']
assert len(stat) == 2
assert stat['c1'] == 105.0 / 100 - 1
assert stat['c2'] == 95.0 / 100 - 1
def test_select_n():
algo = algos.SelectN(n=1, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
algo = algos.SelectN(n=1, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# return 2 we have if all_or_none false
algo = algos.SelectN(n=3, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# return 0 we have if all_or_none true
algo = algos.SelectN(n=3, sort_descending=False, all_or_none=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
def test_select_n_perc():
algo = algos.SelectN(n=0.5, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
def test_select_momentum():
algo = algos.SelectMomentum(n=1, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
actual = s.temp['selected']
assert len(actual) == 1
assert 'c1' in actual
def test_limit_weights():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 0.6, 'c2':0.2, 'c3':0.2}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.5
assert w['c2'] == 0.25
assert w['c3'] == 0.25
algo = algos.LimitWeights(0.3)
assert algo(s)
w = s.temp['weights']
assert w == {}
s.temp['weights'] = {'c1': 0.4, 'c2':0.3, 'c3':0.3}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.4
assert w['c2'] == 0.3
assert w['c3'] == 0.3
def test_limit_deltas():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 1}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.1
s.temp['weights'] = {'c1': 0.05}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.05
s.temp['weights'] = {'c1': 0.5, 'c2': 0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == 0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.5
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1, 'c2': 0.3})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.3
# set exisitng weight
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c1']._weight = 0.3
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c2']._weight = -0.7
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.4
assert w['c2'] == -0.6
def test_rebalance_over_time():
target = mock.MagicMock()
rb = mock.MagicMock()
algo = algos.RebalanceOverTime(n=2)
# patch in rb function
algo._rb = rb
target.temp = {}
target.temp['weights'] = {'a': 1, 'b': 0}
a = mock.MagicMock()
a.weight = 0.
b = mock.MagicMock()
b.weight = 1.
target.children = {'a': a, 'b': b}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 0.5
assert w['b'] == 0.5
assert rb.called
called_tgt = rb.call_args[0][0]
called_tgt_w = called_tgt.temp['weights']
assert len(called_tgt_w) == 2
assert called_tgt_w['a'] == 0.5
assert called_tgt_w['b'] == 0.5
# update weights for next call
a.weight = 0.5
b.weight = 0.5
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 1.
assert w['b'] == 0.
assert rb.call_count == 2
# update weights for next call
# should do nothing now
a.weight = 1
b.weight = 0
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
# no diff in call_count since last time
assert rb.call_count == 2
def test_require():
target = mock.MagicMock()
target.temp = {}
algo = algos.Require(lambda x: len(x) > 0, 'selected')
assert not algo(target)
target.temp['selected'] = []
assert not algo(target)
target.temp['selected'] = ['a', 'b']
assert algo(target)
def test_run_every_n_periods():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=0)
target.now = pd.to_datetime('2010-01-01')
assert algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert algo(target)
target.now = pd.to_datetime('2010-01-05')
assert not algo(target)
def test_run_every_n_periods_offset():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=1)
target.now = pd.to_datetime('2010-01-01')
assert not algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert not algo(target)
target.now = pd.to_datetime('2010-01-05')
assert algo(target)
def test_not():
target = mock.MagicMock()
target.temp = {}
#run except on the 1/2/18
runOnDateAlgo = algos.RunOnDate(pd.to_datetime('2018-01-02'))
notAlgo = algos.Not(runOnDateAlgo)
target.now = pd.to_datetime('2018-01-01')
assert notAlgo(target)
target.now = pd.to_datetime('2018-01-02')
assert not notAlgo(target)
def test_or():
target = mock.MagicMock()
target.temp = {}
#run on the 1/2/18
runOnDateAlgo = algos.RunOnDate(pd.to_datetime('2018-01-02'))
runOnDateAlgo2 = algos.RunOnDate(pd.to_datetime('2018-01-03'))
runOnDateAlgo3 = algos.RunOnDate(pd.to_datetime('2018-01-04'))
runOnDateAlgo4 = algos.RunOnDate(pd.to_datetime('2018-01-04'))
orAlgo = algos.Or([runOnDateAlgo, runOnDateAlgo2, runOnDateAlgo3, runOnDateAlgo4])
#verify it returns false when neither is true
target.now = pd.to_datetime('2018-01-01')
assert not orAlgo(target)
# verify it returns true when the first is true
target.now = pd.to_datetime('2018-01-02')
assert orAlgo(target)
# verify it returns true when the second is true
target.now = pd.to_datetime('2018-01-03')
assert orAlgo(target)
# verify it returns true when both algos return true
target.now = pd.to_datetime('2018-01-04')
assert orAlgo(target)
def test_TargetVol():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=7)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data.loc[dts[0],'c1'] = 95
data.loc[dts[1],'c1'] = 105
data.loc[dts[2],'c1'] = 95
data.loc[dts[3],'c1'] = 105
data.loc[dts[4],'c1'] = 95
data.loc[dts[5],'c1'] = 105
data.loc[dts[6],'c1'] = 95
# low vol c2
data.loc[dts[0], 'c2'] = 99
data.loc[dts[1], 'c2'] = 101
data.loc[dts[2], 'c2'] = 99
data.loc[dts[3], 'c2'] = 101
data.loc[dts[4], 'c2'] = 99
data.loc[dts[5], 'c2'] = 101
data.loc[dts[6], 'c2'] = 99
targetVolAlgo = algos.TargetVol(
0.1,
lookback=pd.DateOffset(days=5),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=1
)
s.setup(data)
s.update(dts[6])
s.temp['weights'] = {'c1':0.5, 'c2':0.5}
assert targetVolAlgo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert np.isclose(weights['c2'],weights['c1'])
unannualized_c2_weight = weights['c1']
targetVolAlgo = algos.TargetVol(
0.1*np.sqrt(252),
lookback=pd.DateOffset(days=5),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=252
)
s.setup(data)
s.update(dts[6])
s.temp['weights'] = {'c1': 0.5, 'c2': 0.5}
assert targetVolAlgo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert np.isclose(weights['c2'], weights['c1'])
assert np.isclose(unannualized_c2_weight, weights['c2'])
def test_PTE_Rebalance():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=30*4)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
# low vol c2
for i,dt in enumerate(dts[:-2]):
if i % 2 == 0:
data.loc[dt,'c1'] = 95
data.loc[dt,'c2'] = 101
else:
data.loc[dt, 'c1'] = 105
data.loc[dt, 'c2'] = 99
dt = dts[-2]
data.loc[dt,'c1'] = 115
data.loc[dt,'c2'] = 97
s.setup(data)
s.update(dts[-2])
s.adjust(1000000)
s.rebalance(0.4,'c1')
s.rebalance(0.6,'c2')
wdf = pd.DataFrame(
np.zeros(data.shape),
columns=data.columns,
index=data.index
)
wdf['c1'] = 0.5
wdf['c2'] = 0.5
PTE_rebalance_Algo = bt.algos.PTE_Rebalance(
0.01,
wdf,
lookback= | pd.DateOffset(months=3) | pandas.DateOffset |
#!/usr/bin/python -u
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
import os
SEED = 123
random.seed(SEED)
np.random.seed(SEED)
# +
#Load the train and test files with pchembl values (used for end-to-end deep learning)
train_with_label_df = pd.read_csv("../data/Train_Compound_Viral_interactions_for_Supervised_Learning.csv",header='infer')
test_with_label_df = pd.read_csv("../data/Test_Compound_Viral_interactions_for_Supervised_Learning.csv",header='infer')
print(train_with_label_df.shape)
print(test_with_label_df.shape)
#Load the train and test embedding representation for compounds
train_compound_df = pd.read_csv("../data/Train_Compound_LS.csv",header='infer')
test_compound_df = pd.read_csv("../data/Test_Compound_LS.csv",header='infer')
print(train_compound_df.shape)
print(test_compound_df.shape)
#Load the train and test morgan fingerprint representation for compounds
train_compound_mfp_df = pd.read_csv("../data/Train_Compound_MFP.csv",header='infer')
test_compound_mfp_df = pd.read_csv("../data/Test_Compound_MFP.csv",header='infer')
print(train_compound_mfp_df.shape)
print(test_compound_mfp_df.shape)
#Load the train and test embedding representation for proteins
train_protein_df = | pd.read_csv("../data/Train_Protein_LS.csv",header=None) | pandas.read_csv |
import os
from src.corpus.brat_writer import write_file
from typing import Dict, List
import pandas as pd
class DocumentMerger:
def __init__(
self,
ent_id2label,
rel_id2label,
true_doc_tokens: Dict[str, List[List[str]]],
save_dir="val"
) -> None:
super().__init__()
self.pred_doc_bins = {}
self.merged_docs = None
self.true_doc_tokens = true_doc_tokens
self.ent_id2label = ent_id2label
self.rel_id2label = rel_id2label
self.save_dir = save_dir
def load_partitions(
self,
tokens2d: List[List[str]],
ents: pd.DataFrame,
rels: pd.DataFrame,
orig_doc_key: str,
partition_key: int,
):
doc = dict(
partition_key=partition_key,
tokens2d=tokens2d,
ents=ents,
rels=rels
)
if orig_doc_key in self.pred_doc_bins:
self.pred_doc_bins[orig_doc_key].append(doc)
else:
self.pred_doc_bins[orig_doc_key] = [doc]
def merge_partitions(self, key, docs):
"""Iteratively, merge document partitions (defined by their predicted
`ents` and `rels`) into a doc based on their `orig_doc`. In other words,
ents and rels having the same `orig_doc` will be combined.
Args:
ents (pd.DataFrame): [description]
rels (pd.DataFrame): [description]
doc_key (str): [description]
"""
tokens2d = []
entities = pd.DataFrame(columns=['start', 'end', 'step_idx', 'label'])
relations = | pd.DataFrame() | pandas.DataFrame |
# author: <NAME>, <NAME>
# date: 2021-11-25
"""This script takes two file paths. It takes in the input path which includes the clean train and test data
and the output directory to store the results in. It performs machine learning analysis.
This script will have 4 outputs: 3 tables and 1 figure.
Usage: src/machine_learning.py --input_path_train=<input_path_train> --input_path_test=<input_path_test> --output_dir=<output_dir>
Options:
--input_path_train=<input_path_train> The path including filename to the raw data of white wine (csv file)
--input_path_test=<input_path_test> The path including filename to the raw data of red wine (csv file)
--output_dir=<output_dir> The directory to the tables and figures
"""
from docopt import docopt
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.model_selection import RandomizedSearchCV, cross_validate
from sklearn.metrics import ConfusionMatrixDisplay
opt = docopt(__doc__)
def main(input_path_train, input_path_test, output_dir):
# input_path_train = "data/processed/train_df.csv"
# input_path_test = "data/processed/test_df.csv"
# output_dir = "results"
# read in data
train_df = pd.read_csv(input_path_train)
test_df = pd.read_csv(input_path_test)
# combine classes to deal with the issue of under-populated classes
train_df.loc[train_df["quality"] == 3, "quality"] = 4
train_df.loc[train_df["quality"] == 9, "quality"] = 8
train_df["quality"] = train_df["quality"].map(str)
test_df.loc[test_df["quality"] == 3, "quality"] = 4
test_df.loc[test_df["quality"] == 9, "quality"] = 8
test_df["quality"] = test_df["quality"].map(str)
# X_train and y_trian
X_train, y_train = train_df.drop(columns=["quality"]), train_df["quality"]
X_test, y_test = test_df.drop(columns=["quality"]), test_df["quality"]
# define the transformer
numeric_features = list(X_train.select_dtypes(include="number").columns)
categorical_features = ["type"]
preprocessor = make_column_transformer(
(StandardScaler(), numeric_features),
(OneHotEncoder(handle_unknown="ignore", sparse=False), categorical_features),
)
# models
dummy = make_pipeline(
preprocessor, DummyClassifier(strategy="stratified", random_state=123)
)
svc = make_pipeline(
preprocessor, SVC(kernel="linear", probability=True, random_state=123)
)
lr = make_pipeline(
preprocessor, LogisticRegression(max_iter=5000, random_state=123)
)
rf = make_pipeline(preprocessor, RandomForestClassifier(random_state=123))
models = {}
models["Dummy"] = dummy
models["SVC"] = svc
models["Logistic Regression"] = lr
models["Random Forest"] = rf
# scoring metrics
scoring = ["accuracy", "f1_macro", "roc_auc_ovr", "roc_auc_ovo"]
# perform cross_validation
results = {}
for key, value in models.items():
results[key] = mean_std_cross_val_scores(
value,
X_train,
y_train,
return_train_score=True,
scoring=scoring,
error_score="raise",
)
cross_val_results = (
pd.DataFrame(results).rename(index={"test_score": "cross_validation_score"}).T
)
# hyperparameter optimization for the best model (random forest classifier)
param_grid_rf = {
"randomforestclassifier__n_estimators": np.logspace(1, 4, 10, dtype=int),
"randomforestclassifier__max_depth": np.linspace(1, 30, 10, dtype=int),
}
search_rf = RandomizedSearchCV(
rf,
param_distributions=param_grid_rf,
return_train_score=True,
n_jobs=2,
n_iter=30,
cv=5,
random_state=123,
scoring="roc_auc_ovr",
error_score="raise",
)
search_rf.fit(X_train, y_train)
# results of random forest
rf_results = {
"Random Forest Best n_estimators": search_rf.best_params_[
"randomforestclassifier__n_estimators"
],
"Random Forest Best max_depth": search_rf.best_params_[
"randomforestclassifier__max_depth"
],
"Random Forest Best Validation Score": search_rf.best_score_,
"Random Forest Roc_Auc Test Score": search_rf.best_estimator_.score(
X_test, y_test
),
}
rf_results = pd.DataFrame(rf_results, index=[0])
# confusion matrix
test_cm = ConfusionMatrixDisplay.from_estimator(
search_rf.best_estimator_, X_test, y_test, display_labels=['<=4', '5', '6', '7', '>=8']
)
# feature importances
categorical_columns = list(
search_rf.best_estimator_.named_steps["columntransformer"]
.named_transformers_["onehotencoder"]
.get_feature_names_out()
)
all_columns = numeric_features + categorical_columns
feature_importances = pd.DataFrame(
search_rf.best_estimator_.named_steps[
"randomforestclassifier"
].feature_importances_,
index=all_columns,
columns=["Feature Importances"],
).sort_values(by="Feature Importances", ascending=False).drop(index='Unnamed: 0')
# export tables, figures and model
try:
cross_val_results.to_csv(f"{output_dir}/cross_val_results.csv")
except:
os.makedirs(os.path.dirname(f"{output_dir}/cross_val_results.csv"))
cross_val_results.to_csv(f"{output_dir}/cross_val_results.csv")
try:
rf_results.to_csv(f"{output_dir}/random_forest_results.csv")
except:
os.makedirs(os.path.dirname(f"{output_dir}/random_forest_results.csv"))
rf_results.to_csv(f"{output_dir}/random_forest_results.csv")
try:
feature_importances.to_csv(f"{output_dir}/feature_importances.csv")
except:
os.makedirs(os.path.dirname(f"{output_dir}/feature_importances.csv"))
feature_importances.to_csv(f"{output_dir}/feature_importances.csv")
try:
plt.savefig(f"{output_dir}/test_cm.png")
except:
os.makedirs(os.path.dirname(f"{output_dir}/test_cm_png"))
plt.savefig(f"{output_dir}/test_cm.png")
try:
pickle.dump(search_rf.best_estimator_, open(f"{output_dir}/random_forest.rds", "wb"))
except:
os.makedirs(os.path.dirname(f"{output_dir}/random_forest.rds"))
pickle.dump(search_rf.best_estimator_, open(f"{output_dir}/random_forest.rds", "wb"))
# helper function, adapted from 573 lecture 4
# https://pages.github.ubc.ca/mds-2021-22/DSCI_573_feat-model-select_students/lectures/04_feat-importances-selection.html
def mean_std_cross_val_scores(model, X_train, y_train, **kwargs):
"""
Returns mean and std of cross validation
Parameters
----------
model :
scikit-learn model
X_train : numpy array or pandas DataFrame
X in the training data
y_train :
y in the training data
Returns
----------
pandas Series with mean scores from cross_validation
"""
scores = cross_validate(model, X_train, y_train, **kwargs, n_jobs=2)
mean_scores = pd.DataFrame(scores).mean()
std_scores = pd.DataFrame(scores).std()
out_col = []
for i in range(len(mean_scores)):
out_col.append((f"%0.3f (+/- %0.3f)" % (mean_scores[i], std_scores[i])))
return | pd.Series(data=out_col, index=mean_scores.index) | pandas.Series |
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import yfinance as yf
from pandas_datareader import data as web
import datetime as dt
from empyrical import*
import quantstats as qs
from darts.models import*
from darts import TimeSeries
from darts.utils.missing_values import fill_missing_values
from darts.metrics import mape
import yahoo_fin.stock_info as si
from yahoofinancials import YahooFinancials
from pypfopt import EfficientFrontier, risk_models, expected_returns, HRPOpt, objective_functions
import logging
import warnings
from warnings import filterwarnings
from IPython.display import display
import copy
# ------------------------------------------------------------------------------------------
today = dt.date.today()
# ------------------------------------------------------------------------------------------
class Engine:
def __init__(self,start_date, portfolio, weights=None, rebalance=None, benchmark=['SPY'], end_date=today, optimizer=None, max_vol=0.15):
self.start_date = start_date
self.end_date = end_date
self.portfolio = portfolio
self.weights = weights
self.benchmark = benchmark
self.optimizer = optimizer
self.max_vol = max_vol
self.rebalance = rebalance
if self.weights==None:
self.weights = [1.0/len(self.portfolio)]*len(self.portfolio)
if self.optimizer=="EF":
self.weights = efficient_frontier(self, perf="False")
if self.optimizer=="MV":
self.weights = mean_var(self, vol_max=max_vol, perf="False")
if self.optimizer=="HRP":
self.weights = hrp(self, perf="False")
if self.rebalance!=None:
self.rebalance = make_rebalance(self.start_date, self.end_date, self.optimizer, self.portfolio, self.rebalance)
#-------------------------------------------------------------------------------------------
def get_returns(stocks,wts, start_date, end_date=today):
if len(stocks) > 1:
assets = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date)['Adj Close']
ret_data = assets.pct_change()[1:]
returns = (ret_data * wts).sum(axis = 1)
return returns
else:
df = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date)['Adj Close']
df = pd.DataFrame(df)
returns = df.pct_change()
return returns
# ------------------------------------------------------------------------------------------
def get_pricing(stocks, start_date, end_date=today, pricing="Adj Close", wts=1):
if len(stocks) > 1:
assets = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date)[pricing]
return assets
else:
df = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date)[pricing]
return df
# ------------------------------------------------------------------------------------------
def get_data(stocks, period="max", trading_year_days=252):
p = {"period": period}
for stock in stocks:
years = {
'1mo' : math.ceil(trading_year_days/12),
'3mo' : math.ceil(trading_year_days/4),
'6mo' : math.ceil(trading_year_days/2),
'1y': trading_year_days,
'2y' : 2*trading_year_days,
'5y' : 5*trading_year_days,
'10y' : 10*trading_year_days,
'20y' : 20*trading_year_days,
'max' : len(yf.Ticker(stock).history(**p)['Close'].pct_change())
}
df = web.DataReader(stocks, data_source='yahoo', start = "1980-01-01", end= today)
df = pd.DataFrame(df)
df = df.tail(years[period])
df = pd.DataFrame(df)
df = df.drop(['Adj Close'], axis=1)
df = df[["Open", "High", "Low", "Close", "Volume"]]
return df
# ------------------------------------------------------------------------------------------
#reformat
def creturns(stocks,wts=1, period="max", benchmark= None, plot=True, pricing="Adj Close", trading_year_days=252, end_date = today):
p = {"period": period}
for stock in stocks:
years = {
'1mo' : math.ceil(trading_year_days/12),
'3mo' : math.ceil(trading_year_days/4),
'6mo' : math.ceil(trading_year_days/2),
'1y': trading_year_days,
'2y' : 2*trading_year_days,
'5y' : 5*trading_year_days,
'10y' : 10*trading_year_days,
'20y' : 20*trading_year_days,
'max' : len(yf.Ticker(stock).history(**p)['Close'].pct_change())
}
if len(stocks) > 1:
df = web.DataReader(stocks, data_source='yahoo', start = "1980-01-01", end= end_date)[pricing]
if benchmark != None:
df2 = web.DataReader(benchmark, data_source='yahoo', start = "1980-01-01", end= end_date)[pricing]
df = pd.DataFrame(df)
df = df.tail(years[period])
df2 = df2.tail(years[period])
return_df2 = df2.pct_change()[1:]
ret_data = df.pct_change()[1:]
ret_data = (ret_data + 1).cumprod()
port_ret = (ret_data * wts).sum(axis = 1)
return_df2 = (return_df2 + 1).cumprod()
ret_data['Portfolio'] = port_ret
ret_data['Benchmark'] = return_df2
ret_data = pd.DataFrame(ret_data)
else:
df = pd.DataFrame(df)
df = df.tail(years[period])
ret_data = df.pct_change()[1:]
ret_data = (ret_data + 1).cumprod()
port_ret = (ret_data * wts).sum(axis = 1)
ret_data['Portfolio'] = port_ret
ret_data = | pd.DataFrame(ret_data) | pandas.DataFrame |
import sys
import csv
import numpy as np
import gpflow
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import pandas as pd
import h5py
from sklearn.model_selection import train_test_split
import tensorflow as tf
from scipy.cluster.vq import kmeans
tf.set_random_seed(1234)
import pickle
def loaddata(subID, whichModel='PSwitch'):
'''
Loads data for subject-level modeling
'''
data = h5py.File('penaltykickdata.h5','r')
subID1HE = np.array(data.get('subID')).astype('float32')
otherdata = np.array(data.get('otherfeatures')).astype('float32')
switchBool = np.array(data.get('targets')).astype('float32') #did they switch at time t+1
trialidx = np.array(data.get('trialidx')).astype('float32')
time = np.array(data.get('time')).astype('int32')
if whichModel == 'PSwitch':
targets = np.array(data.get('targets')).astype('float32')
elif whichModel == 'ExtraEV':
targets = np.array(data.get('EVtargets').value).astype('int32')
otherdata = np.hstack((otherdata, switchBool))
Xfeatures_totaldata = np.hstack((otherdata, subID1HE))
Xfeatures_totaldata = pd.DataFrame(Xfeatures_totaldata)
offset = otherdata.shape[1]
subdata = Xfeatures_totaldata[Xfeatures_totaldata[offset+subID]==1]
subtargets = | pd.DataFrame(targets) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from six import StringIO
from dae.tools.generate_histogram import (
ScoreHistogramInfo,
GenerateScoresHistograms,
)
# pytestmark = pytest.mark.xfail
class MyStringIO(StringIO):
def __add__(self, other):
return ""
@pytest.fixture
def score_files():
score = pd.DataFrame({"SCORE": [1, 2, 3, 4, 4, 5, 6]})
rankscore = pd.DataFrame({"RANKSCORE": [1, 10, 100, 100, 1000, 10000]})
rankscore_zero_start = pd.DataFrame(
{"RANKSCORE_0": [0, 1, 10, 100, 100, 1000, 10000]}
)
return [score, rankscore, rankscore_zero_start]
@pytest.fixture
def score_files_by_chunks():
score = [
pd.DataFrame({"SCORE": [1, 2, 3]}),
pd.DataFrame({"SCORE": [4, 4, 5]}),
pd.DataFrame({"SCORE": [6]}),
]
rankscore = [
pd.DataFrame({"RANKSCORE": [1, 10, 100]}),
pd.DataFrame({"RANKSCORE": [100, 1000, 10000]}),
pd.DataFrame({"RANKSCORE": [1000000]}),
]
rankscore_zero_start = [
pd.DataFrame({"RANKSCORE_0": [0, 1, 10]}),
pd.DataFrame({"RANKSCORE_0": [100, 100, 1000]}),
pd.DataFrame({"RANKSCORE_0": [10000]}),
]
return [
score,
score,
rankscore,
rankscore_zero_start,
rankscore_zero_start,
]
@pytest.fixture
def score_files_with_start_end():
score = [
pd.DataFrame(
{"SCORE": [1, 2, 3], "start": [5, 10, 20], "end": [5, 11, 23]}
),
pd.DataFrame(
{"SCORE": [4, 4, 5], "start": [5, 10, 20], "end": [5, 11, 23]}
),
pd.DataFrame({"SCORE": [6], "start": [100], "end": [100]}),
]
rankscore = [
pd.DataFrame(
{
"RANKSCORE": [1, 10, 100],
"start": [5, 10, 20],
"end": [5, 10, 20],
}
),
pd.DataFrame(
{
"RANKSCORE": [100, 1000, 10000],
"start": [5, 10, 20],
"end": [5, 10, 20],
}
),
pd.DataFrame({"RANKSCORE": [1000000], "start": [100], "end": [100]}),
]
rankscore_zero_start = [
pd.DataFrame(
{
"RANKSCORE_0": [0, 1, 10],
"start": [5, 10, 20],
"end": [5, 10, 20],
}
),
pd.DataFrame(
{
"RANKSCORE_0": [100, 100, 1000],
"start": [5, 10, 20],
"end": [5, 10, 20],
}
),
| pd.DataFrame({"RANKSCORE_0": [10000], "start": [100], "end": [100]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""data_augmentation.py
# Notebook: Generate Training Dataset
In this Notebook, we want to simulate training dataset from the real world dataset. There are two steps in making such data:
* 1) Create pair of trajectories from the original set
* 2) Create label per pair of trajectories
# Required packages
"""
import numpy as np
from numpy import save, load
import pandas as pd
import matplotlib.pyplot as plt
import glob
"""# Define functions"""
def read_data(path, format, n, min_len):
data_list=[]
c=0
for file in glob.glob(path+format):
#print(file)
try:
df = pd.read_csv(file, header=None, sep=',|;')
except:
df = pd.DataFrame ()
if ((len(df)>min_len)):
data_list.append(df)
c = c + 1
if c >= n:
break
dataset=pd.concat(data_list)
dataset.columns=['id', 'date', 'lng', 'lat']
dataset=dataset.reset_index(drop=True)
print ('Step1: Read files')
return dataset
def linux_TS(df):
df.date=pd.to_datetime(df.date)
df['TS'] = (pd.DatetimeIndex(df.date-pd.Timedelta('02:00:00')).astype(np.int64) // 10**9)
return df
def min_drop_func(df, min_len):
data_list=[]
ids=np.unique(df.id)
for i in ids:
temp=df[df.id==i]
if len (temp) > min_len:
data_list.append(temp)
data_list = | pd.concat(data_list) | pandas.concat |
import numpy as np
import pandas as pd
import os
# http://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29
# Load .csv file
path = 'german/german_final.csv'
data = | pd.read_csv(path, header=None) | pandas.read_csv |
# coding: utf-8
"""
Aurelio_Amerio_Higgs_v4.py
In this analysis, I have used several MLP models, applied to the Kaggle Higgs dataset,
in order to distinguish signal from noise.
----------------------------------------------------------------------
author: <NAME> (<EMAIL>)
Student ID: QT08313
Date: 03/08/2018
----------------------------------------------------------------------
"""
# # Import data and preprocess it
# In[49]:
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
use_all_features = True
use_full_data = True
test_sml_size = 3000
# In[50]:
#file paths
train_sig_path_sml = "data/train_sml_sig.csv"
train_bg_path_sml = "data/train_sml_bg.csv"
train_sig_path = "data/train_sig.csv"
train_bg_path = "data/train_bg.csv"
test_sig_path = "data/test_sig.csv"
test_bg_path = "data/test_bg.csv"
#read csv
train_sig_sml = | pd.read_csv(train_sig_path_sml, header=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: | algos.value_counts(s, bins=1) | pandas.core.algorithms.value_counts |
import numpy as np
import pandas as pd
import pytest
from sid.shared import boolean_choices
from src.create_initial_states.create_initial_immunity import (
_calculate_endog_immunity_prob,
)
from src.create_initial_states.create_initial_immunity import (
_calculate_exog_immunity_prob,
)
from src.create_initial_states.create_initial_immunity import (
_calculate_total_immunity_prob,
)
from src.create_initial_states.create_initial_immunity import create_initial_immunity
def date(month, day):
return pd.Timestamp(f"2020-{month:0d}-{day:0d}")
@pytest.fixture
def empirical_infections():
df = pd.DataFrame()
df["date"] = [date(3, 1), date(3, 1), date(3, 3), date(3, 5), date(3, 5)]
df["county"] = ["A", "A", "A", "B", "B"]
df["age_group_rki"] = ["young", "old", "young", "young", "old"]
df["newly_infected"] = [1, 1, 2, 2, 2]
return df.set_index(["date", "county", "age_group_rki"])["newly_infected"]
@pytest.fixture
def synthetic_data():
df = pd.DataFrame()
df["county"] = ["A"] * 5 + ["B"] * 5
df["age_group_rki"] = ["young"] * 3 + ["old"] * 2 + ["young"] * 2 + ["old"] * 3
return df
def test_calculate_total_immunity_prob(synthetic_data):
total_immunity = pd.DataFrame()
total_immunity["county"] = list("AABB")
total_immunity["age_group_rki"] = ["old", "young"] * 2
total_immunity["cases"] = [2, 6, 4, 4]
total_immunity = total_immunity.set_index(["age_group_rki", "county"])["cases"]
undetected_multiplier = 2
population_size = 100
expected = pd.DataFrame()
expected["county"] = list("AABB")
expected["age_group_rki"] = ["old", "young"] * 2
expected["prob"] = [4 / 20, 12 / 30, 8 / 30, 8 / 20]
expected = expected.set_index(["age_group_rki", "county"])["prob"]
res = _calculate_total_immunity_prob(
total_immunity=undetected_multiplier * total_immunity,
synthetic_data=synthetic_data,
population_size=population_size,
)
pd.testing.assert_series_equal(
res.sort_index(), expected.sort_index(), check_names=False
)
def test_calculate_endog_immunity_prob(synthetic_data):
initial_infections = | pd.DataFrame() | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import pandas
from modin.pandas.utils import to_pandas
import modin.pandas as pd
from pathlib import Path
import pyarrow as pa
import os
import sys
from .utils import df_equals
# needed to resolve ray-project/ray#3744
pa.__version__ = "0.11.0"
pd.DEFAULT_NPARTITIONS = 4
PY2 = sys.version_info[0] == 2
TEST_PARQUET_FILENAME = "test.parquet"
TEST_CSV_FILENAME = "test.csv"
TEST_JSON_FILENAME = "test.json"
TEST_HTML_FILENAME = "test.html"
TEST_EXCEL_FILENAME = "test.xlsx"
TEST_FEATHER_FILENAME = "test.feather"
TEST_READ_HDF_FILENAME = "test.hdf"
TEST_WRITE_HDF_FILENAME_MODIN = "test_write_modin.hdf"
TEST_WRITE_HDF_FILENAME_PANDAS = "test_write_pandas.hdf"
TEST_MSGPACK_FILENAME = "test.msg"
TEST_STATA_FILENAME = "test.dta"
TEST_PICKLE_FILENAME = "test.pkl"
TEST_SAS_FILENAME = os.getcwd() + "/data/test1.sas7bdat"
TEST_FWF_FILENAME = "test_fwf.txt"
TEST_GBQ_FILENAME = "test_gbq."
SMALL_ROW_SIZE = 2000
def modin_df_equals_pandas(modin_df, pandas_df):
return to_pandas(modin_df).sort_index().equals(pandas_df.sort_index())
def setup_parquet_file(row_size, force=False):
if os.path.exists(TEST_PARQUET_FILENAME) and not force:
pass
else:
pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
).to_parquet(TEST_PARQUET_FILENAME)
def create_test_ray_dataframe():
df = pd.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
)
return df
def create_test_pandas_dataframe():
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
)
return df
@pytest.fixture
def test_files_eq(path1, path2):
with open(path1, "rb") as file1, open(path2, "rb") as file2:
file1_content = file1.read()
file2_content = file2.read()
if file1_content == file2_content:
return True
else:
return False
def teardown_test_file(test_path):
if os.path.exists(test_path):
os.remove(test_path)
def teardown_parquet_file():
if os.path.exists(TEST_PARQUET_FILENAME):
os.remove(TEST_PARQUET_FILENAME)
@pytest.fixture
def make_csv_file():
"""Pytest fixture factory that makes temp csv files for testing.
Yields:
Function that generates csv files
"""
filenames = []
def _make_csv_file(
filename=TEST_CSV_FILENAME,
row_size=SMALL_ROW_SIZE,
force=False,
delimiter=",",
encoding=None,
):
if os.path.exists(filename) and not force:
pass
else:
dates = pandas.date_range("2000", freq="h", periods=row_size)
df = pandas.DataFrame(
{
"col1": np.arange(row_size),
"col2": [str(x.date()) for x in dates],
"col3": np.arange(row_size),
"col4": [str(x.time()) for x in dates],
}
)
df.to_csv(filename, sep=delimiter, encoding=encoding)
filenames.append(filename)
return df
# Return function that generates csv files
yield _make_csv_file
# Delete csv files that were created
for filename in filenames:
if os.path.exists(filename):
os.remove(filename)
def setup_json_file(row_size, force=False):
if os.path.exists(TEST_JSON_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_json(TEST_JSON_FILENAME)
def teardown_json_file():
if os.path.exists(TEST_JSON_FILENAME):
os.remove(TEST_JSON_FILENAME)
def setup_html_file(row_size, force=False):
if os.path.exists(TEST_HTML_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_html(TEST_HTML_FILENAME)
def teardown_html_file():
if os.path.exists(TEST_HTML_FILENAME):
os.remove(TEST_HTML_FILENAME)
def setup_clipboard(row_size, force=False):
df = pandas.DataFrame({"col1": np.arange(row_size), "col2": np.arange(row_size)})
df.to_clipboard()
def setup_excel_file(row_size, force=False):
if os.path.exists(TEST_EXCEL_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_excel(TEST_EXCEL_FILENAME)
def teardown_excel_file():
if os.path.exists(TEST_EXCEL_FILENAME):
os.remove(TEST_EXCEL_FILENAME)
def setup_feather_file(row_size, force=False):
if os.path.exists(TEST_FEATHER_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_feather(TEST_FEATHER_FILENAME)
def teardown_feather_file():
if os.path.exists(TEST_FEATHER_FILENAME):
os.remove(TEST_FEATHER_FILENAME)
def setup_hdf_file(row_size, force=False, format=None):
if os.path.exists(TEST_READ_HDF_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_hdf(TEST_READ_HDF_FILENAME, key="df", format=format)
def teardown_hdf_file():
if os.path.exists(TEST_READ_HDF_FILENAME):
os.remove(TEST_READ_HDF_FILENAME)
def setup_msgpack_file(row_size, force=False):
if os.path.exists(TEST_MSGPACK_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_msgpack(TEST_MSGPACK_FILENAME)
def teardown_msgpack_file():
if os.path.exists(TEST_MSGPACK_FILENAME):
os.remove(TEST_MSGPACK_FILENAME)
def setup_stata_file(row_size, force=False):
if os.path.exists(TEST_STATA_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_stata(TEST_STATA_FILENAME)
def teardown_stata_file():
if os.path.exists(TEST_STATA_FILENAME):
os.remove(TEST_STATA_FILENAME)
def setup_pickle_file(row_size, force=False):
if os.path.exists(TEST_PICKLE_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_pickle(TEST_PICKLE_FILENAME)
def teardown_pickle_file():
if os.path.exists(TEST_PICKLE_FILENAME):
os.remove(TEST_PICKLE_FILENAME)
@pytest.fixture
def make_sql_connection():
"""Sets up sql connections and takes them down after the caller is done.
Yields:
Factory that generates sql connection objects
"""
filenames = []
def _sql_connection(filename, table=""):
# Remove file if exists
if os.path.exists(filename):
os.remove(filename)
filenames.append(filename)
# Create connection and, if needed, table
conn = "sqlite:///{}".format(filename)
if table:
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3, 4, 5, 6],
"col2": [7, 8, 9, 10, 11, 12, 13],
"col3": [14, 15, 16, 17, 18, 19, 20],
"col4": [21, 22, 23, 24, 25, 26, 27],
"col5": [0, 0, 0, 0, 0, 0, 0],
}
)
df.to_sql(table, conn)
return conn
yield _sql_connection
# Takedown the fixture
for filename in filenames:
if os.path.exists(filename):
os.remove(filename)
def setup_fwf_file():
if os.path.exists(TEST_FWF_FILENAME):
return
fwf_data = """id8141 360.242940 149.910199 11950.7
id1594 444.953632 166.985655 11788.4
id1849 364.136849 183.628767 11806.2
id1230 413.836124 184.375703 11916.8
id1948 502.953953 173.237159 12468.3"""
with open(TEST_FWF_FILENAME, "w") as f:
f.write(fwf_data)
def teardown_fwf_file():
if os.path.exists(TEST_FWF_FILENAME):
os.remove(TEST_FWF_FILENAME)
def test_from_parquet():
setup_parquet_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_parquet(TEST_PARQUET_FILENAME)
modin_df = pd.read_parquet(TEST_PARQUET_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_parquet_file()
def test_from_parquet_with_columns():
setup_parquet_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_parquet(TEST_PARQUET_FILENAME, columns=["col1"])
modin_df = pd.read_parquet(TEST_PARQUET_FILENAME, columns=["col1"])
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_parquet_file()
def test_from_json():
setup_json_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_json(TEST_JSON_FILENAME)
modin_df = pd.read_json(TEST_JSON_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_json_file()
def test_from_html():
setup_html_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_html(TEST_HTML_FILENAME)[0]
modin_df = pd.read_html(TEST_HTML_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_html_file()
@pytest.mark.skip(reason="No clipboard on Travis")
def test_from_clipboard():
setup_clipboard(SMALL_ROW_SIZE)
pandas_df = pandas.read_clipboard()
modin_df = pd.read_clipboard()
assert modin_df_equals_pandas(modin_df, pandas_df)
def test_from_excel():
setup_excel_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_excel(TEST_EXCEL_FILENAME)
modin_df = pd.read_excel(TEST_EXCEL_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_excel_file()
# @pytest.mark.skip(reason="Arrow version mismatch between Pandas and Feather")
def test_from_feather():
setup_feather_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_feather(TEST_FEATHER_FILENAME)
modin_df = pd.read_feather(TEST_FEATHER_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_feather_file()
def test_from_hdf():
setup_hdf_file(SMALL_ROW_SIZE, format=None)
pandas_df = pandas.read_hdf(TEST_READ_HDF_FILENAME, key="df")
modin_df = pd.read_hdf(TEST_READ_HDF_FILENAME, key="df")
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_hdf_file()
def test_from_hdf_format():
setup_hdf_file(SMALL_ROW_SIZE, format="table")
pandas_df = pandas.read_hdf(TEST_READ_HDF_FILENAME, key="df")
modin_df = pd.read_hdf(TEST_READ_HDF_FILENAME, key="df")
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_hdf_file()
def test_from_msgpack():
setup_msgpack_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_msgpack(TEST_MSGPACK_FILENAME)
modin_df = pd.read_msgpack(TEST_MSGPACK_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_msgpack_file()
def test_from_stata():
setup_stata_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_stata(TEST_STATA_FILENAME)
modin_df = pd.read_stata(TEST_STATA_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_stata_file()
def test_from_pickle():
setup_pickle_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_pickle(TEST_PICKLE_FILENAME)
modin_df = pd.read_pickle(TEST_PICKLE_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
teardown_pickle_file()
def test_from_sql(make_sql_connection):
filename = "test_from_sql.db"
table = "test_from_sql"
conn = make_sql_connection(filename, table)
query = "select * from {0}".format(table)
pandas_df = pandas.read_sql(query, conn)
modin_df = pd.read_sql(query, conn)
assert modin_df_equals_pandas(modin_df, pandas_df)
with pytest.warns(UserWarning):
pd.read_sql_query(query, conn)
with pytest.warns(UserWarning):
pd.read_sql_table(table, conn)
@pytest.mark.skip(reason="No SAS write methods in Pandas")
def test_from_sas():
pandas_df = pandas.read_sas(TEST_SAS_FILENAME)
modin_df = pd.read_sas(TEST_SAS_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
def test_from_csv(make_csv_file):
make_csv_file()
pandas_df = pandas.read_csv(TEST_CSV_FILENAME)
modin_df = pd.read_csv(TEST_CSV_FILENAME)
assert modin_df_equals_pandas(modin_df, pandas_df)
if not PY2:
pandas_df = pandas.read_csv(Path(TEST_CSV_FILENAME))
modin_df = pd.read_csv(Path(TEST_CSV_FILENAME))
assert modin_df_equals_pandas(modin_df, pandas_df)
def test_from_csv_chunksize(make_csv_file):
make_csv_file()
# Tests __next__ and correctness of reader as an iterator
# Use larger chunksize to read through file quicker
rdf_reader = pd.read_csv(TEST_CSV_FILENAME, chunksize=500)
pd_reader = pandas.read_csv(TEST_CSV_FILENAME, chunksize=500)
for modin_df, pd_df in zip(rdf_reader, pd_reader):
assert modin_df_equals_pandas(modin_df, pd_df)
# Tests that get_chunk works correctly
rdf_reader = pd.read_csv(TEST_CSV_FILENAME, chunksize=1)
pd_reader = pandas.read_csv(TEST_CSV_FILENAME, chunksize=1)
modin_df = rdf_reader.get_chunk(1)
pd_df = pd_reader.get_chunk(1)
assert modin_df_equals_pandas(modin_df, pd_df)
# Tests that read works correctly
rdf_reader = pd.read_csv(TEST_CSV_FILENAME, chunksize=1)
pd_reader = pandas.read_csv(TEST_CSV_FILENAME, chunksize=1)
modin_df = rdf_reader.read()
pd_df = pd_reader.read()
assert modin_df_equals_pandas(modin_df, pd_df)
def test_from_csv_delimiter(make_csv_file):
make_csv_file(delimiter="|")
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, sep="|")
modin_df = pd.read_csv(TEST_CSV_FILENAME, sep="|")
assert modin_df_equals_pandas(modin_df, pandas_df)
modin_df = pd.DataFrame.from_csv(
TEST_CSV_FILENAME, sep="|", parse_dates=False, header="infer", index_col=None
)
pandas_df = pandas.DataFrame.from_csv(
TEST_CSV_FILENAME, sep="|", parse_dates=False, header="infer", index_col=None
)
assert modin_df_equals_pandas(modin_df, pandas_df)
def test_from_csv_skiprows(make_csv_file):
make_csv_file()
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, skiprows=2)
modin_df = pd.read_csv(TEST_CSV_FILENAME, skiprows=2)
assert modin_df_equals_pandas(modin_df, pandas_df)
def test_from_csv_encoding(make_csv_file):
make_csv_file(encoding="latin8")
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, encoding="latin8")
modin_df = pd.read_csv(TEST_CSV_FILENAME, encoding="latin8")
assert modin_df_equals_pandas(modin_df, pandas_df)
def test_from_csv_default_to_pandas_behavior(make_csv_file):
make_csv_file()
with pytest.warns(UserWarning):
# Test nrows
pd.read_csv(TEST_CSV_FILENAME, nrows=10)
if not PY2:
with pytest.warns(UserWarning):
# This tests that we default to pandas on a buffer
from io import StringIO
pd.read_csv(StringIO(open(TEST_CSV_FILENAME, "r").read()))
with pytest.warns(UserWarning):
pd.read_csv(TEST_CSV_FILENAME, skiprows=lambda x: x in [0, 2])
def test_from_csv_index_col(make_csv_file):
make_csv_file()
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, index_col="col1")
modin_df = pd.read_csv(TEST_CSV_FILENAME, index_col="col1")
assert modin_df_equals_pandas(modin_df, pandas_df)
def test_from_csv_skipfooter(make_csv_file):
make_csv_file()
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, skipfooter=13)
modin_df = pd.read_csv(TEST_CSV_FILENAME, skipfooter=13)
assert modin_df_equals_pandas(modin_df, pandas_df)
def test_from_csv_parse_dates(make_csv_file):
make_csv_file(force=True)
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, parse_dates=[["col2", "col4"]])
modin_df = pd.read_csv(TEST_CSV_FILENAME, parse_dates=[["col2", "col4"]])
assert modin_df_equals_pandas(modin_df, pandas_df)
pandas_df = pandas.read_csv(
TEST_CSV_FILENAME, parse_dates={"time": ["col2", "col4"]}
)
modin_df = pd.read_csv(TEST_CSV_FILENAME, parse_dates={"time": ["col2", "col4"]})
assert modin_df_equals_pandas(modin_df, pandas_df)
@pytest.mark.skip(reason="No clipboard on Travis")
def test_to_clipboard():
modin_df = create_test_ray_dataframe()
pandas_df = create_test_pandas_dataframe()
modin_df.to_clipboard()
modin_as_clip = | pandas.read_clipboard() | pandas.read_clipboard |
import os
import sys
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, compat
from pandas.util import testing as tm
class TestToCSV:
@pytest.mark.xfail((3, 6, 5) > sys.version_info >= (3, 5),
reason=("Python csv library bug "
"(see https://bugs.python.org/issue32255)"))
def test_to_csv_with_single_column(self):
# see gh-18676, https://bugs.python.org/issue32255
#
# Python's CSV library adds an extraneous '""'
# before the newline when the NaN-value is in
# the first row. Otherwise, only the newline
# character is added. This behavior is inconsistent
# and was patched in https://bugs.python.org/pull_request4672.
df1 = DataFrame([None, 1])
expected1 = """\
""
1.0
"""
with tm.ensure_clean('test.csv') as path:
df1.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected1
df2 = DataFrame([1, None])
expected2 = """\
1.0
""
"""
with tm.ensure_clean('test.csv') as path:
df2.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected2
def test_to_csv_defualt_encoding(self):
# GH17097
df = DataFrame({'col': ["AAAAA", "ÄÄÄÄÄ", "ßßßßß", "聞聞聞聞聞"]})
with tm.ensure_clean('test.csv') as path:
# the default to_csv encoding is uft-8.
df.to_csv(path)
tm.assert_frame_equal(pd.read_csv(path, index_col=0), df)
def test_to_csv_quotechar(self):
df = DataFrame({'col': [1, 2]})
expected = """\
"","col"
"0","1"
"1","2"
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1) # 1=QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
expected = """\
$$,$col$
$0$,$1$
$1$,$2$
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, quotechar="$")
with open(path, 'r') as f:
assert f.read() == expected
with tm.ensure_clean('test.csv') as path:
with pytest.raises(TypeError, match='quotechar'):
df.to_csv(path, quoting=1, quotechar=None)
def test_to_csv_doublequote(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a""a"
"1","""bb"""
'''
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
from _csv import Error
with tm.ensure_clean('test.csv') as path:
with pytest.raises(Error, match='escapechar'):
df.to_csv(path, doublequote=False) # no escapechar set
def test_to_csv_escapechar(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a\\"a"
"1","\\"bb\\""
'''
with tm.ensure_clean('test.csv') as path: # QUOTE_ALL
df.to_csv(path, quoting=1, doublequote=False, escapechar='\\')
with open(path, 'r') as f:
assert f.read() == expected
df = DataFrame({'col': ['a,a', ',bb,']})
expected = """\
,col
0,a\\,a
1,\\,bb\\,
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=3, escapechar='\\') # QUOTE_NONE
with open(path, 'r') as f:
assert f.read() == expected
def test_csv_to_string(self):
df = DataFrame({'col': [1, 2]})
expected_rows = [',col',
'0,1',
'1,2']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv() == expected
def test_to_csv_decimal(self):
# see gh-781
df = DataFrame({'col1': [1], 'col2': ['a'], 'col3': [10.1]})
expected_rows = [',col1,col2,col3',
'0,1,a,10.1']
expected_default = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv() == expected_default
expected_rows = [';col1;col2;col3',
'0;1;a;10,1']
expected_european_excel = tm.convert_rows_list_to_csv_str(
expected_rows)
assert df.to_csv(decimal=',', sep=';') == expected_european_excel
expected_rows = [',col1,col2,col3',
'0,1,a,10.10']
expected_float_format_default = tm.convert_rows_list_to_csv_str(
expected_rows)
assert df.to_csv(float_format='%.2f') == expected_float_format_default
expected_rows = [';col1;col2;col3',
'0;1;a;10,10']
expected_float_format = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(decimal=',', sep=';',
float_format='%.2f') == expected_float_format
# see gh-11553: testing if decimal is taken into account for '0.0'
df = pd.DataFrame({'a': [0, 1.1], 'b': [2.2, 3.3], 'c': 1})
expected_rows = ['a,b,c',
'0^0,2^2,1',
'1^1,3^3,1']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(index=False, decimal='^') == expected
# same but for an index
assert df.set_index('a').to_csv(decimal='^') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(decimal="^") == expected
def test_to_csv_float_format(self):
# testing if float_format is taken into account for the index
# GH 11553
df = pd.DataFrame({'a': [0, 1], 'b': [2.2, 3.3], 'c': 1})
expected_rows = ['a,b,c',
'0,2.20,1',
'1,3.30,1']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(float_format='%.2f') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(
float_format='%.2f') == expected
def test_to_csv_na_rep(self):
# see gh-11553
#
# Testing if NaN values are correctly represented in the index.
df = DataFrame({'a': [0, np.NaN], 'b': [0, 1], 'c': [2, 3]})
expected_rows = ['a,b,c',
'0.0,0,2',
'_,1,3']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# now with an index containing only NaNs
df = DataFrame({'a': np.NaN, 'b': [0, 1], 'c': [2, 3]})
expected_rows = ['a,b,c',
'_,0,2',
'_,1,3']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# check if na_rep parameter does not break anything when no NaN
df = DataFrame({'a': 0, 'b': [0, 1], 'c': [2, 3]})
expected_rows = ['a,b,c',
'0,0,2',
'0,1,3']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
def test_to_csv_date_format(self):
# GH 10209
df_sec = DataFrame({'A': pd.date_range('20130101', periods=5, freq='s')
})
df_day = DataFrame({'A': pd.date_range('20130101', periods=5, freq='d')
})
expected_rows = [',A',
'0,2013-01-01 00:00:00',
'1,2013-01-01 00:00:01',
'2,2013-01-01 00:00:02',
'3,2013-01-01 00:00:03',
'4,2013-01-01 00:00:04']
expected_default_sec = tm.convert_rows_list_to_csv_str(expected_rows)
assert df_sec.to_csv() == expected_default_sec
expected_rows = [',A',
'0,2013-01-01 00:00:00',
'1,2013-01-02 00:00:00',
'2,2013-01-03 00:00:00',
'3,2013-01-04 00:00:00',
'4,2013-01-05 00:00:00']
expected_ymdhms_day = tm.convert_rows_list_to_csv_str(expected_rows)
assert (df_day.to_csv(date_format='%Y-%m-%d %H:%M:%S') ==
expected_ymdhms_day)
expected_rows = [',A',
'0,2013-01-01',
'1,2013-01-01',
'2,2013-01-01',
'3,2013-01-01',
'4,2013-01-01']
expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
assert df_sec.to_csv(date_format='%Y-%m-%d') == expected_ymd_sec
expected_rows = [',A',
'0,2013-01-01',
'1,2013-01-02',
'2,2013-01-03',
'3,2013-01-04',
'4,2013-01-05']
expected_default_day = tm.convert_rows_list_to_csv_str(expected_rows)
assert df_day.to_csv() == expected_default_day
assert df_day.to_csv(date_format='%Y-%m-%d') == expected_default_day
# see gh-7791
#
# Testing if date_format parameter is taken into account
# for multi-indexed DataFrames.
df_sec['B'] = 0
df_sec['C'] = 1
expected_rows = ['A,B,C',
'2013-01-01,0,1']
expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
df_sec_grouped = df_sec.groupby([pd.Grouper(key='A', freq='1h'), 'B'])
assert (df_sec_grouped.mean().to_csv(date_format='%Y-%m-%d') ==
expected_ymd_sec)
def test_to_csv_multi_index(self):
# see gh-6618
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]))
exp_rows = [',1',
',2',
'0,1']
exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv() == exp
exp_rows = ['1', '2', '1']
exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv(index=False) == exp
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]),
index=pd.MultiIndex.from_arrays([[1], [2]]))
exp_rows = [',,1', ',,2', '1,2,1']
exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv() == exp
exp_rows = ['1', '2', '1']
exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv(index=False) == exp
df = DataFrame(
[1], columns=pd.MultiIndex.from_arrays([['foo'], ['bar']]))
exp_rows = [',foo', ',bar', '0,1']
exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv() == exp
exp_rows = ['foo', 'bar', '1']
exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv(index=False) == exp
@pytest.mark.parametrize("ind,expected", [
(pd.MultiIndex(levels=[[1.0]],
codes=[[0]],
names=["x"]),
"x,data\n1.0,1\n"),
(pd.MultiIndex(levels=[[1.], [2.]],
codes=[[0], [0]],
names=["x", "y"]),
"x,y,data\n1.0,2.0,1\n")
])
@pytest.mark.parametrize("klass", [
pd.DataFrame, pd.Series
])
def test_to_csv_single_level_multi_index(self, ind, expected, klass):
# see gh-19589
result = klass( | pd.Series([1], ind, name="data") | pandas.Series |
import requests
from bs4 import BeautifulSoup
import pandas as pd
def get_tables(urls, link=False):
"""Returns a dataframes list with the tables of the different groups.
Keyword arguments:
urls -- list with urls of the different groups
link -- indicates whether you want to include the url of every team in the dataframe
(default False)."""
# Declare header variable with browsers info
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/47.0.2526.106 Safari/537.36'}
tables_list = [] # Save results in a list
try:
# Create BeautifulSoup objects from our responses
for i, url in enumerate(urls):
response = requests.get(url, headers=headers)
url_bs = BeautifulSoup(response.content, 'html.parser')
# Lists
header = []
datos = []
# The find_all() method returns the tags with the required information.
tags_header = url_bs.find_all("div", {"class": "classificationHeadingItem"})
tags_data = url_bs.find_all("div",
{"class": ["classificationItemOddWrapper", "classificationItemEvenWrapper"]})
tags_link = url_bs.find_all("a", {"class": "classificationTeam"})
# Extract headers, teams, goals and url links.
for tag_header in tags_header:
header.append(tag_header.text)
header.insert(0, 'Equipo'), header.insert(0, 'POS')
header = header[:-1]
for tag_data in tags_data:
datos.append(tag_data.text)
links_lst = []
for tag_link in tags_link:
links_lst.append('https://www.futboleras.es' + tag_link.get('href'))
# Format and put the data into a dataframe.
df = pd.DataFrame([sub.split(' ') for sub in datos])
df.drop(df.columns[[0, 10, 11]], axis=1, inplace=True)
splitcol = df[2].str.split(' ', 2, expand=True)
# Add the points, group and url links of each team.
df.insert(1, 'Equipo', splitcol[1]), df.insert(2, 'PTOS', splitcol[2])
del (df[2])
df.columns = header
df.insert(2, 'Grupo', i + 1)
if link:
df['Link'] = links_lst
tables_list.append(df)
return tables_list
except Exception:
print('Enter a valid url list.')
def general_table(tables_list):
"""Returns the general table with all the teams.
Keyword arguments:
table_list -- list with all tables stored in dataframes"""
# Concatenate all dataframes.
df = | pd.concat(tables_list, axis=0) | pandas.concat |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.