prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from functools import reduce
import pickle
import os
import pymssql
from virgo import market
startDate_default = '20060101'
endDate_default = (datetime.now() + timedelta(days=-1)).strftime('%Y%m%d')
# endDate_default = datetime.now().strftime('%Y%m%d')
indexTickerUnivSR_default = np.array(['000300.SH', '000016.SH', '000905.SH'])
indexTickerNameUnivSR_default = np.array(['沪深300', '上证50', '中证500'])
# Global val
conn243 = pymssql.connect(server='192.168.1.243', user="yuman.hu", password="<PASSWORD>")
conn247 = pymssql.connect(server='192.168.1.247', user="yuman.hu", password="<PASSWORD>")
# daily data download
class dailyQuant(object):
def __init__(self, startDate=startDate_default, endDate=endDate_default,
indexTickerUnivSR=indexTickerUnivSR_default, indexTickerNameUnivSR=indexTickerNameUnivSR_default):
self.startDate = startDate
self.endDate = endDate
self.rawData_path = '../data/rawData/'
self.fundamentalData_path = '../data/fundamentalData/'
self.indexTickerUnivSR = indexTickerUnivSR
self.indexTickerNameUnivSR = indexTickerNameUnivSR
self.tradingDateV, self.timeSeries = self.get_dateData()
self.tickerUnivSR, self.stockTickerUnivSR, self.tickerNameUnivSR, self.stockTickerNameUnivSR, self.tickerUnivTypeR = self.get_tickerData()
def get_dateData(self):
sql = '''
SELECT [tradingday]
FROM [Group_General].[dbo].[TradingDayList]
where tradingday>='20060101'
order by tradingday asc
'''
dateSV = pd.read_sql(sql, conn247)
tradingdays = dateSV.tradingday.unique()
tradingDateV = np.array([x.replace('-', '') for x in tradingdays])
timeSeries = pd.Series(pd.to_datetime(tradingDateV))
pd.Series(tradingDateV).to_csv(self.rawData_path+ 'tradingDateV.csv', index=False)
return tradingDateV, timeSeries
def get_tickerData(self):
# and B.[SecuAbbr] not like '%%ST%%'
# where ChangeDate>='%s'
sql = '''
SELECT A.[ChangeDate],A.[ChangeType],B.[SecuCode],B.[SecuMarket],B.[SecuAbbr]
FROM [JYDB].[dbo].[LC_ListStatus] A
inner join [JYDB].[dbo].SecuMain B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
order by SecuCode asc
'''
dataV = pd.read_sql(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
# dataV.ChangeDate = pd.Series([x.strftime('%Y%m%d') for x in dataV.ChangeDate.values])
dataV.ChangeDate = dataV.ChangeDate.map(lambda x: x.strftime('%Y%m%d'))
flagV = np.full(len(dataV), True)
flagList = []
for i in range(len(dataV)):
if dataV.iat[i, 1] == 4:
if dataV.iat[i, 0] < self.tradingDateV[0]:
flagList.append(dataV.iat[i, 2])
for i in range(len(dataV)):
if dataV.iat[i, 2] in flagList:
flagV[i] = False
dataV = dataV[flagV]
stockTickerUnivSR = dataV.SecuCode.unique()
tickerUnivSR = np.append(self.indexTickerUnivSR, stockTickerUnivSR)
stockTickerNameUnivSR = dataV.SecuAbbr.unique()
tickerNameUnivSR = np.append(self.indexTickerNameUnivSR, stockTickerNameUnivSR)
tickerUnivTypeR = np.append(np.full(len(self.indexTickerUnivSR), 3), np.ones(len(dataV)))
pd.DataFrame(self.indexTickerUnivSR).T.to_csv(self.rawData_path+'indexTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerUnivSR).T.to_csv(self.rawData_path+'stockTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivSR).T.to_csv(self.rawData_path+'tickerUnivSR.csv', header=False, index=False)
pd.DataFrame(self.indexTickerNameUnivSR).T.to_csv(self.rawData_path+'indexTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerNameUnivSR).T.to_csv(self.rawData_path+'stockTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerNameUnivSR).T.to_csv(self.rawData_path+'tickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivTypeR).T.to_csv(self.rawData_path+'tickerUnivTypeR.csv', header=False, index=False)
return tickerUnivSR, stockTickerUnivSR, tickerNameUnivSR, stockTickerNameUnivSR, tickerUnivTypeR
def __tradingData(self,tradingDay):
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_DailyQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where A.tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn243)
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataV = pd.concat([dataIndex,dataStock])
sql = '''
SELECT [TradingDay], [SecuCode], [StockReturns]
FROM [Group_General].[dbo].[DailyQuote]
where tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn247)
sql = '''
SELECT A.[TradingDay], B.[SecuCode], A.[ChangePCT]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataIndex.ChangePCT = dataIndex.ChangePCT / 100
dataIndex = dataIndex.rename({'ChangePCT': 'StockReturns'}, axis='columns')
dataR = pd.concat([dataIndex, dataStock])
data = pd.merge(dataV,dataR)
flagMarket = data.SecuMarket==83
data['SecuCode'][flagMarket] = data['SecuCode'].map(lambda x: x + '.SH')
data['SecuCode'][~flagMarket] = data['SecuCode'].map(lambda x: x + '.SZ')
data.TradingDay = data.TradingDay.map(lambda x: x.strftime('%Y%m%d'))
preCloseM = pd.DataFrame(pd.pivot_table(data,values='PrevClosePrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
openM = pd.DataFrame(pd.pivot_table(data,values='OpenPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
highM = pd.DataFrame(pd.pivot_table(data,values='HighPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
lowM =pd.DataFrame(pd.pivot_table(data,values='LowPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
closeM = pd.DataFrame(pd.pivot_table(data,values='ClosePrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
volumeM = pd.DataFrame(pd.pivot_table(data,values='TurnoverVolume',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
amountM = pd.DataFrame(pd.pivot_table(data,values='TurnoverValue',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
retM = pd.DataFrame(pd.pivot_table(data,values='StockReturns',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)], columns=self.tickerUnivSR)
sql = '''
SELECT A.[ExDiviDate], B.[SecuMarket], B.[SecuCode], A.[AdjustingFactor]
FROM [JYDB].[dbo].[QT_AdjustingFactor] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
'''
dataAF = pd.read_sql_query(sql, conn243)
dataAF = dataAF.rename({'ExDiviDate':'TradingDay'},axis=1)
flagMarket = dataAF.SecuMarket == 83
dataAF['SecuCode'][flagMarket] = dataAF['SecuCode'].map(lambda x: x + '.SH')
dataAF['SecuCode'][~flagMarket] = dataAF['SecuCode'].map(lambda x: x + '.SZ')
dataAF.TradingDay = dataAF.TradingDay.map(lambda x: x.strftime('%Y%m%d'))
adjFactorM = pd.pivot_table(dataAF, values='AdjustingFactor', index='TradingDay', columns='SecuCode')
adjFactorM.fillna(method='pad', inplace=True)
adjFactorM = pd.DataFrame(adjFactorM ,index=self.tradingDateV, columns=self.tickerUnivSR)
adjFactorM.fillna(method='pad', inplace=True)
adjFactorM =pd.DataFrame(adjFactorM ,index=[str(tradingDay)])
sql = '''
SELECT A.[ChangeDate],A.[ChangeType],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_ListStatus] A
inner join [JYDB].[dbo].SecuMain B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where (A.ChangeType = 1 or A.ChangeType = 4)
'''
dataStock = pd.read_sql_query(sql, conn243)
sql = '''
SELECT A.[PubDate],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_IndexBasicInfo] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[IndexCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
'''
dataIndex = pd.read_sql_query(sql, conn243)
dataIndex['ChangeType'] = 1
dataIndex = dataIndex.rename({'PubDate': 'ChangeDate'}, axis='columns')
dataV = pd.concat([dataIndex, dataStock])
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
# dataV.ChangeDate = pd.Series([x.strftime('%Y%m%d') for x in dataV.ChangeDate.values])
dataV.ChangeDate = dataV.ChangeDate.map(lambda x: x.strftime('%Y%m%d'))
listedM = pd.pivot_table(dataV, values='ChangeType', index='ChangeDate', columns='SecuCode')
dateTotal = np.union1d(listedM.index.values, [str(tradingDay)])
listedM = pd.DataFrame(listedM, index=dateTotal, columns=self.tickerUnivSR)
listedM[listedM == 4] = 0
listedM.fillna(method='pad', inplace=True)
listedM = pd.DataFrame(listedM,index= [str(tradingDay)])
listedM = listedM.fillna(0)
sql = '''
SELECT A.[SuspendDate],A.[ResumptionDate],A.[SuspendTime], A.[ResumptionTime], B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_SuspendResumption] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where A.[SuspendDate] = '%s'
'''%tradingDay
if tradingDay == self.tradingDateV[0]:
sql = sql.replace('A.[SuspendDate] = ','A.[SuspendDate] <= ')
dataSusp = pd.read_sql_query(sql, conn243)
flagMarket = dataSusp.SecuMarket == 83
dataSusp['SecuCode'][flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SH')
dataSusp['SecuCode'][~flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SZ')
dataSusp.SuspendDate = dataSusp.SuspendDate.map(lambda x: x.strftime('%Y%m%d'))
dataSusp['flag'] = 1
startFlag = pd.pivot_table(dataSusp, values='flag',index='SuspendDate', columns='SecuCode')
try:
startFlag = pd.DataFrame(startFlag, index=[str(tradingDay)], columns=self.tickerUnivSR)
except:
startFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
endFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
amount = amountM.fillna(0)
flag = (amount == 0)
endFlag[startFlag == 1] = 1
endFlag[flag] = 1
suspM = endFlag.fillna(0)
suspM[(listedM==0)] = 1
else:
dataSusp = pd.read_sql_query(sql, conn243)
flagMarket = dataSusp.SecuMarket == 83
dataSusp['SecuCode'][flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SH')
dataSusp['SecuCode'][~flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SZ')
dataSusp.SuspendDate = dataSusp.SuspendDate.map(lambda x: x.strftime('%Y%m%d'))
file2 = open('../data/rawData/{}.pkl'.format(self.tradingDateV[self.tradingDateV.tolist().index(tradingDay)-1]), 'rb')
suspPre = pickle.load(file2)['suspM']
file2.close()
dataSusp['flag'] = 1
startFlag = pd.pivot_table(dataSusp, values='flag',index='SuspendDate', columns='SecuCode')
try:
startFlag = pd.DataFrame(startFlag, index=[str(tradingDay)], columns=self.tickerUnivSR)
except:
startFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
endFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
amount = amountM.fillna(0)
flag = (amount == 0)
endFlag[startFlag == 1] = 1
endFlag[~flag] = 0
suspM = pd.concat([suspPre,endFlag]).fillna(method='pad')
suspM = pd.DataFrame(suspM,index=[str(tradingDay)])
suspM[(listedM==0)] = 1
sql='''
SELECT A.[SpecialTradeTime],A.[SpecialTradeType],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_SpecialTrade] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where (A.[SpecialTradeType]=1 or A.[SpecialTradeType] = 2 or A.[SpecialTradeType] = 5 or A.[SpecialTradeType] = 6)
and A.[SpecialTradeTime] = '%s'
'''% tradingDay
if tradingDay == self.tradingDateV[0]:
sql = sql.replace('A.[SpecialTradeTime] = ','A.[SpecialTradeTime] <= ')
dataV = | pd.read_sql_query(sql, conn243) | pandas.read_sql_query |
import pandas as pd
import numpy as np
import pickle
from .utils import *
def predNextDays(optmod_name, opt_mod, var_name, pred_days):
pred = (opt_mod[optmod_name]['mod_data'][var_name])[opt_mod[optmod_name]['i_start'] + opt_mod[optmod_name]['period'] -1 :opt_mod[optmod_name]['i_start'] + opt_mod[optmod_name]['period']+pred_days]
print("Mod: %s \t Next days: %s: \t %s" %(optmod_name, var_name, str([int(x) for x in pred])))
print("Mod: %s \t Variation: %s: \t %s" %(optmod_name, var_name, str([int(x) for x in (pred[1:len(pred)] - pred[0:len(pred)-1])])))
class ModelStats:
def __init__(self, model, act_data, pred_days = 10):
self.model = model
self.act_data = act_data
self.data = pd.DataFrame(self.calcData())
self.data.set_index("date", inplace=True)
def printKpi(self, date, kpi_name, title, num_format = 'd', bperc = False):
var_uff = "uff_" + kpi_name
var_mod = "mod_" + kpi_name
if "uff_" + kpi_name in self.data.columns.tolist():
#print(("%30s: %7" + num_format + " vs %7" + num_format + " (%5" + num_format + " vs %5" + num_format + "), errore: %" + num_format + "") %(
print(("%30s: %7s vs %7s (%5s vs %5s), errore: %s") %(
title,
format_number(self.data[var_uff][np.datetime64(date, 'D')], bperc = bperc),
format_number(self.data[var_mod][np.datetime64(date, 'D')], bperc = bperc),
format_number(self.data[var_uff][np.datetime64(date, 'D')] - self.data[var_uff][np.datetime64(date, 'D') - np.timedelta64(1, 'D')], bperc = bperc),
format_number(self.data[var_mod][np.datetime64(date, 'D')] - self.data[var_mod][np.datetime64(date, 'D') - np.timedelta64(1, 'D')], bperc = bperc),
format_number(self.data[var_uff][np.datetime64(date, 'D')] - self.data[var_mod][np.datetime64(date, 'D')], bperc = bperc)
))
else:
#print(("%30s: %7" + num_format + " (%5" + num_format + ")") %(
print(("%30s: %7s (%5s)") %(
title,
format_number(self.data[var_mod][np.datetime64(date, 'D')], bperc = bperc),
format_number(self.data[var_mod][np.datetime64(date, 'D')] - self.data[var_mod][np.datetime64(date, 'D') - np.timedelta64(1, 'D')], bperc = bperc)
))
def printKpis(self, date):
self.printKpi(date, 'Igc_cum', "Tot Infected")
self.printKpi(date, 'Igc', "Currently Infected")
self.printKpi(date, 'Igci_t', "Currently in Int. Care")
self.printKpi(date, 'Gc_cum', "Tot Recovered")
self.printKpi(date, 'M_cum', "Tot Dead")
print()
self.printKpi(date, 'Igc_cum_pinc', "% Increase, Infected", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_Gc_Igc', "% Mortality Rate", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_M_Igc', "% Known Recovery Rate", num_format=".3f", bperc = True)
print()
self.printKpi(date, 'ratio_Gccum_Igccum', "% Recovered / Tot", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_Mcum_Igccum', "% Dead / Tot", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_Igci_Igc', "% Intensive Care", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_Igcn_Igc', "% Non Intensive Care", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_I_Igc', "% Total Infected / Known Infected", num_format=".3f", bperc = True)
self.printKpi(date, 'R0_t', "R0", num_format=".3f")
print()
print()
print("*** 7 days ahead predictions ***")
self.printPredict(date, 'Igc_cum', "Tot Infettati", pred_step = 7, bperc = False)
print()
self.printPredict(date, 'Igc', "Attualmente Infetti", pred_step = 7, bperc = False)
print()
self.printPredict(date, 'Igci_t', "Attualmente in Intensiva", pred_step = 7, bperc = False)
print()
self.printPredict(date, 'Gc_cum', "Tot Guariti", pred_step = 7, bperc = False)
print()
self.printPredict(date, 'M_cum', "Tot Morti", pred_step = 7, bperc = False)
def printPredict(self, curr_date, kpi_name, title, pred_step = 7, bperc = False):
var_mod = "mod_" + kpi_name
data = self.data[var_mod][np.datetime64(curr_date, 'D') : np.datetime64(np.datetime64(curr_date, 'D') + np.timedelta64(pred_step, 'D'))]
data_delta = pd.Series(data).diff(1)
data_str = "["
for val in data:
data_str = " " + data_str + " {:7s}".format(format_number(val)) + " "
data_str = data_str + "]"
data_delta_str = "["
for val in data_delta:
#data_delta_str = " " + data_delta_str + " {:7s}".format(format_number(val)) + " "
#print(val)
#if math.isfinite(val):
data_delta_str = " " + data_delta_str + " {:7s}".format(str(format_number(val))) + " "
#else:
# data_delta_str = " " + data_delta_str + " {:7s}".format("0") + " "
data_delta_str = data_delta_str + "]"
print(("%30s: %60s") %(
title,
data_str
))
print(("%30s: %60s") %(
"Var.",
data_delta_str
))
def calcData(self):
def calcDataVar(data):
istart = self.model['i_start']
#iend = istart + len(data)
mod_len = len(self.model['mod_data']['dat_Igc'])
#return [np.NaN for i in range (0, istart)] + data.tolist() + [np.NaN for i in range(istart + len(data) -1, mod_len-1)]
return [np.NaN for i in range (0, istart)] + data.tolist()[self.act_data.i_start:] + [np.NaN for i in range(istart + len(data[self.act_data.i_start:]) -1, mod_len-1)]
def calcDataVarDate(data):
istart = self.model['i_start']
mod_len = len(self.model['mod_data']['dat_Igc'])
#first_date = data[0] - np.timedelta64(istart, 'D')
first_date = data[self.act_data.i_start] - np.timedelta64(istart, 'D')
return [np.datetime64(first_date + np.timedelta64(i, 'D'), 'D') for i in range (0, mod_len)]
uff_Igci_t = calcDataVar(self.act_data.dat_Igci_t)
uff_Igcn_t = calcDataVar(self.act_data.dat_Igcn_t)
uff_Igc = calcDataVar(self.act_data.dat_Igc)
uff_Igc_cum = calcDataVar(self.act_data.dat_Igc_cum)
uff_Gc_cum = calcDataVar(self.act_data.dat_Gc_cum)
uff_M_cum = calcDataVar(self.act_data.dat_M_cum)
uff_Gc = [np.NaN] + np.diff(uff_Gc_cum).tolist()
uff_M = [np.NaN] + np.diff(uff_M_cum).tolist()
uff_Igc_cum_pinc = (pd.Series(uff_Igc_cum)/pd.Series(uff_Igc_cum).shift(1)) - 1
uff_ratio_Gc_Igc = (pd.Series(uff_Gc)/pd.Series(uff_Igc).shift(1))
uff_ratio_M_Igc = (pd.Series(uff_M)/pd.Series(uff_Igc).shift(1))
uff_ratio_Gccum_Igccum = (np.array(uff_Gc_cum)/np.array(uff_Igc_cum)).tolist()
uff_ratio_Mcum_Igccum = (np.array(uff_M_cum)/np.array(uff_Igc_cum)).tolist()
uff_ratio_Igci_Igc = (np.array(uff_Igci_t)/np.array(uff_Igc)).tolist()
uff_ratio_Igcn_Igc = (np.array(uff_Igcn_t)/np.array(uff_Igc)).tolist()
mod_Igci_t = self.model['mod_data']['dat_Igci_t']
mod_Igcn_t = self.model['mod_data']['dat_Igcn_t']
mod_Ias_t = self.model['mod_data']['dat_Ias_t']
mod_Igs_t = self.model['mod'].Igs_t
mod_Igc = self.model['mod_data']['dat_Igc']
mod_Igc_cum = self.model['mod_data']['dat_Igc_cum']
mod_I = self.model['mod_data']['dat_I']
#mod_NIs_t = self.model['mod_data']['dat_NIs']
mod_G = self.model['mod_data']['dat_G']
mod_Gc = self.model['mod_data']['dat_Gc']
mod_M = self.model['mod_data']['dat_M']
mod_G_cum = self.model['mod_data']['dat_G_cum']
mod_Gc_cum = self.model['mod_data']['dat_Gc_cum']
mod_M_cum = self.model['mod_data']['dat_M_cum']
mod_Popi_t = self.model['mod_data']['dat_Popi_t']
mod_R0_t = self.model['mod_data']['dat_R0_t']
mod_Igc_cum_pinc = ( | pd.Series(mod_Igc_cum) | pandas.Series |
"""
Quantilization functions and related stuff
"""
from functools import partial
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas.core.dtypes.common import (
ensure_int64, is_categorical_dtype, is_datetime64_dtype,
is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_integer,
is_scalar, is_timedelta64_dtype)
from pandas.core.dtypes.missing import isna
from pandas import (
Categorical, Index, Interval, IntervalIndex, Series, Timedelta, Timestamp,
to_datetime, to_timedelta)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False, duplicates='raise'):
"""
Bin values into discrete intervals.
Use `cut` when you need to segment and sort data values into bins. This
function is also useful for going from a continuous variable to a
categorical variable. For example, `cut` could convert ages to groups of
age ranges. Supports binning into an equal number of bins, or a
pre-specified array of bins.
Parameters
----------
x : array-like
The input array to be binned. Must be 1-dimensional.
bins : int, sequence of scalars, or pandas.IntervalIndex
The criteria to bin by.
* int : Defines the number of equal-width bins in the range of `x`. The
range of `x` is extended by .1% on each side to include the minimum
and maximum values of `x`.
* sequence of scalars : Defines the bin edges allowing for non-uniform
width. No extension of the range of `x` is done.
* IntervalIndex : Defines the exact bins to be used.
right : bool, default True
Indicates whether `bins` includes the rightmost edge or not. If
``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``
indicate (1,2], (2,3], (3,4]. This argument is ignored when
`bins` is an IntervalIndex.
labels : array or bool, optional
Specifies the labels for the returned bins. Must be the same length as
the resulting bins. If False, returns only integer indicators of the
bins. This affects the type of the output container (see below).
This argument is ignored when `bins` is an IntervalIndex.
retbins : bool, default False
Whether to return the bins or not. Useful when bins is provided
as a scalar.
precision : int, default 3
The precision at which to store and display the bins labels.
include_lowest : bool, default False
Whether the first interval should be left-inclusive or not.
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.23.0
Returns
-------
out : pandas.Categorical, Series, or ndarray
An array-like object representing the respective bin for each value
of `x`. The type depends on the value of `labels`.
* True (default) : returns a Series for Series `x` or a
pandas.Categorical for all other inputs. The values stored within
are Interval dtype.
* sequence of scalars : returns a Series for Series `x` or a
pandas.Categorical for all other inputs. The values stored within
are whatever the type in the sequence is.
* False : returns an ndarray of integers.
bins : numpy.ndarray or IntervalIndex.
The computed or specified bins. Only returned when `retbins=True`.
For scalar or sequence `bins`, this is an ndarray with the computed
bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For
an IntervalIndex `bins`, this is equal to `bins`.
See Also
--------
qcut : Discretize variable into equal-sized buckets based on rank
or based on sample quantiles.
pandas.Categorical : Array type for storing data that come from a
fixed set of values.
Series : One-dimensional array with axis labels (including time series).
pandas.IntervalIndex : Immutable Index implementing an ordered,
sliceable set.
Notes
-----
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Series or pandas.Categorical object.
Examples
--------
Discretize into three equal-sized bins.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)
... # doctest: +ELLIPSIS
[(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
array([0.994, 3. , 5. , 7. ]))
Discovers the same bins, but assign them specific labels. Notice that
the returned Categorical's categories are `labels` and is ordered.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),
... 3, labels=["bad", "medium", "good"])
[bad, good, medium, medium, good, bad]
Categories (3, object): [bad < medium < good]
``labels=False`` implies you just want the bins back.
>>> pd.cut([0, 1, 1, 2], bins=4, labels=False)
array([0, 1, 1, 3])
Passing a Series as an input returns a Series with categorical dtype:
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, 3)
... # doctest: +ELLIPSIS
a (1.992, 4.667]
b (1.992, 4.667]
c (4.667, 7.333]
d (7.333, 10.0]
e (7.333, 10.0]
dtype: category
Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ...
Passing a Series as an input returns a Series with mapping value.
It is used to map numerically to intervals based on bins.
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 4.0
dtype: float64, array([0, 2, 4, 6, 8]))
Use `drop` optional when bins is not unique
>>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,
... right=False, duplicates='drop')
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 3.0
dtype: float64, array([0, 2, 4, 6, 8]))
Passing an IntervalIndex for `bins` results in those categories exactly.
Notice that values not covered by the IntervalIndex are set to NaN. 0
is to the left of the first bin (which is closed on the right), and 1.5
falls between two bins.
>>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
>>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)
[NaN, (0, 1], NaN, (2, 3], (4, 5]]
Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]]
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= .001 * abs(mn) if mn != 0 else .001
mx += .001 * abs(mx) if mx != 0 else .001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
pass
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype,
duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : 1d ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(x, bins, labels=labels,
precision=precision, include_lowest=True,
dtype=dtype, duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype)
def _bins_to_cuts(x, bins, right=True, labels=None,
precision=3, include_lowest=False,
dtype=None, duplicates='raise'):
if duplicates not in ['raise', 'drop']:
raise ValueError("invalid value for 'duplicates' parameter, "
"valid options are: raise, drop")
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {bins!r}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(bins=bins))
else:
bins = unique_bins
side = 'left' if right else 'right'
ids = ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(bins, precision, right=right,
include_lowest=include_lowest,
dtype=dtype)
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
if len(x) > 1 and x[-1] == '.':
x = x[:-1]
return x
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to numeric so that cut method can
handle it
"""
dtype = None
if is_datetime64tz_dtype(x):
dtype = x.dtype
elif is_datetime64_dtype(x):
x = to_datetime(x)
dtype = np.datetime64
elif is_timedelta64_dtype(x):
x = to_timedelta(x)
dtype = np.timedelta64
if dtype is not None:
# GH 19768: force NaT to NaN during integer conversion
x = np.where(x.notna(), x.view(np.int64), np.nan)
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins)
if is_timedelta64_dtype(dtype):
if bins_dtype in ['timedelta', 'timedelta64']:
bins = | to_timedelta(bins) | pandas.to_timedelta |
# Long Author List formatting tool
# <NAME> (<EMAIL> 2020)
# Usage: python3 lal.py
# Input: lal_data2.txt with one author per row and up to 5 affiliations
# <First>;<Last>;<Email>;<Group1>;<Group2>;<Group3>;<Group4>;<Group5>
# Example: Heiko;Goelzer;<EMAIL>;IMAU,UU;ULB;nil;nil;nil
# Use 'nil','nan','0' or '-' to fill unused affiliations
# Output: lal_inout2.txt when saving the modified listing, can be used as
# input the next time
# Parsed: lal_parsed.txt when parsed to insert in a manuscript
# Selected lines and selected blocks can be rearranged by dragging, sorted by last name and deleted.
# 'Save' will write the updated list to a file that can be reused later
# 'Parse' will write formatted output that can be copy-pasted
import tkinter as tk;
# Listbox for ordering
class ReorderableListbox(tk.Listbox):
""" A Tkinter listbox with drag & drop reordering of lines """
def __init__(self, master, **kw):
kw['selectmode'] = tk.EXTENDED
tk.Listbox.__init__(self, master, kw)
self.bind('<Button-1>', self.setCurrent)
self.bind('<Control-1>', self.toggleSelection)
self.bind('<B1-Motion>', self.shiftSelection)
self.bind('<Leave>', self.onLeave)
self.bind('<Enter>', self.onEnter)
self.selectionClicked = False
self.left = False
self.unlockShifting()
self.ctrlClicked = False
def orderChangedEventHandler(self):
pass
def onLeave(self, event):
# prevents changing selection when dragging
# already selected items beyond the edge of the listbox
if self.selectionClicked:
self.left = True
return 'break'
def onEnter(self, event):
#TODO
self.left = False
def setCurrent(self, event):
self.ctrlClicked = False
i = self.nearest(event.y)
self.selectionClicked = self.selection_includes(i)
if (self.selectionClicked):
return 'break'
def toggleSelection(self, event):
self.ctrlClicked = True
def moveElement(self, source, target):
if not self.ctrlClicked:
element = self.get(source)
self.delete(source)
self.insert(target, element)
def unlockShifting(self):
self.shifting = False
def lockShifting(self):
# prevent moving processes from disturbing each other
# and prevent scrolling too fast
# when dragged to the top/bottom of visible area
self.shifting = True
def shiftSelection(self, event):
if self.ctrlClicked:
return
selection = self.curselection()
if not self.selectionClicked or len(selection) == 0:
return
selectionRange = range(min(selection), max(selection))
currentIndex = self.nearest(event.y)
if self.shifting:
return 'break'
lineHeight = 12
bottomY = self.winfo_height()
if event.y >= bottomY - lineHeight:
self.lockShifting()
self.see(self.nearest(bottomY - lineHeight) + 1)
self.master.after(500, self.unlockShifting)
if event.y <= lineHeight:
self.lockShifting()
self.see(self.nearest(lineHeight) - 1)
self.master.after(500, self.unlockShifting)
if currentIndex < min(selection):
self.lockShifting()
notInSelectionIndex = 0
for i in selectionRange[::-1]:
if not self.selection_includes(i):
self.moveElement(i, max(selection)-notInSelectionIndex)
notInSelectionIndex += 1
currentIndex = min(selection)-1
self.moveElement(currentIndex, currentIndex + len(selection))
self.orderChangedEventHandler()
elif currentIndex > max(selection):
self.lockShifting()
notInSelectionIndex = 0
for i in selectionRange:
if not self.selection_includes(i):
self.moveElement(i, min(selection)+notInSelectionIndex)
notInSelectionIndex += 1
currentIndex = max(selection)+1
self.moveElement(currentIndex, currentIndex - len(selection))
self.orderChangedEventHandler()
self.unlockShifting()
return 'break'
def deleteSelection(self):
# delete selected items
if len(self.curselection()) == 0:
return
self.delete(min(self.curselection()),max(self.curselection()))
def sortAll(self):
# sort all items alphabetically
temp_list = list(self.get(0, tk.END))
temp_list.sort(key=str.lower)
# delete contents of present listbox
self.delete(0, tk.END)
# load listbox with sorted data
for item in temp_list:
self.insert(tk.END, item)
def sortSelection(self):
# sort selected items alphabetically
if len(self.curselection()) == 0:
return
mmax = max(self.curselection())
mmin = min(self.curselection())
temp_list = list(self.get(mmin,mmax))
#print(temp_list)
# Sort reverse because pushed back in reverse order
temp_list.sort(key=str.lower,reverse=True)
# delete contents of present listbox
self.delete(mmin,mmax)
# load listbox with sorted data
for item in temp_list:
self.insert(mmin, item)
def save(self,df):
# save current list
temp_list = list(self.get(0, tk.END))
# create output df
dfout = pd.DataFrame()
for item in temp_list:
items = item.split(",")
matchl = (df["LastName"].isin([items[0]]))
matchf = (df["FirstName"].isin([items[1]]))
matche = (df["Email"].isin([items[2]]))
dfout = dfout.append(df[matchf & matchl])
dfout.to_csv('lal_inout2.txt', sep=';', header=None, index=None)
print("File saved!")
def parse_word(self,df):
# save current list
temp_list = list(self.get(0, tk.END))
# create output df
dfout = pd.DataFrame()
for item in temp_list:
items = item.split(",")
matchl = (df["LastName"].isin([items[0]]))
matchf = (df["FirstName"].isin([items[1]]))
dfout = dfout.append(df[matchf & matchl])
# parse
first = dfout["FirstName"]
last = dfout["LastName"]
grp = dfout[["Group1","Group2","Group3","Group4","Group5"]]
unique_groups = []
group_ids = []
k = 0
# collect unique groups and indices
for i in range(0,dfout.shape[0]):
groups = []
# loop through max 5 groups
for j in range(0,5):
# Exclude some common dummy place holders
if (grp.iloc[i,j] not in ['nil','nan','0','-']):
if (grp.iloc[i,j] not in unique_groups):
unique_groups.append(grp.iloc[i,j])
k = k + 1
groups.append(k)
else:
ix = unique_groups.index(grp.iloc[i,j])+1
groups.append(ix)
# Add author group ids
group_ids.append(groups)
#print(group_ids)
#print(unique_groups)
# Compose text
with open("lal_parsed_word.txt", "w") as text_file:
# write out names
for i in range(0,dfout.shape[0]):
print(first.iloc[i].strip(), end =" ", file=text_file)
print(last.iloc[i].strip(), end ="", file=text_file)
for j in range(0,len(group_ids[i])):
if j < len(group_ids[i])-1:
print(str(group_ids[i][j]), end =",", file=text_file)
else:
print(str(group_ids[i][j]), end ="", file=text_file)
#print(" ", end ="", file=text_file)
if (i < dfout.shape[0]-1):
# comma and space before next name
print(", ", end ="", file=text_file)
# Add some space between names and affiliations
print("\n\n", file=text_file)
# Write out affiliations
for i in range(0,len(unique_groups)):
print("(", end ="", file=text_file)
print(str(i+1), end ="", file=text_file)
print(")", end =" ", file=text_file)
print(unique_groups[i], end ="\n", file=text_file)
print("File lal_parsed_word.txt written")
# Parse tex \author and \affil
def parse_tex(self,df):
# save current list
temp_list = list(self.get(0, tk.END))
# create output df
dfout = | pd.DataFrame() | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not | is_platform_little_endian() | pandas.compat.is_platform_little_endian |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# TODO: import only necessary tensorflow functions
import tensorflow as tf
import tensorflow_datasets as tfds
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay,\
roc_curve, roc_auc_score, classification_report, accuracy_score, precision_score, recall_score
# TODO: Add docstrings
# Loads the Patch Camelyon dataset
def load_pcam(data_dir=None):
pcam, pcam_info = tfds.load("patch_camelyon", with_info=True, data_dir=data_dir)
print(pcam_info)
return pcam, pcam_info
# Converts images to prepare them for modelling
def convert_sample(sample):
# Credit: <NAME>
image, label = sample['image'], sample['label']
image = tf.image.convert_image_dtype(image, tf.float32)
label = tf.one_hot(label, 2, dtype=tf.float32)
return image, label
# Alternative to convert_sample which also converts images to grayscale
def convert_sample_grayscale(sample):
image, label = sample['image'], sample['label']
image = tf.image.rgb_to_grayscale(image, name=None)
image = tf.image.convert_image_dtype(image, tf.float32)
label = tf.one_hot(label, 2, dtype=tf.float32)
return image, label
# Substitute for ImageDataGenerator which gets along with the TensorFlow Dataset object
def build_pipelines(pcam, grayscale=False):
# Uses the grayscale version of convert_sample
if grayscale:
train_pipeline = pcam['train'].map(convert_sample_grayscale, num_parallel_calls=8).shuffle(1024).repeat().batch(64).prefetch(2)
valid_pipeline = pcam['validation'].map(convert_sample_grayscale, num_parallel_calls=8).repeat().batch(128).prefetch(2)
test_pipeline = pcam['test'].map(convert_sample_grayscale, num_parallel_calls=8).batch(128).prefetch(2)
# Uses the normal version of convert_sample
else:
# Credit: <NAME>
train_pipeline = pcam['train'].map(convert_sample, num_parallel_calls=8).shuffle(1024).repeat().batch(64).prefetch(2)
valid_pipeline = pcam['validation'].map(convert_sample, num_parallel_calls=8).repeat().batch(128).prefetch(2)
test_pipeline = pcam['test'].map(convert_sample, num_parallel_calls=8).batch(128).prefetch(2)
return train_pipeline, valid_pipeline, test_pipeline
# Export the training history to a .csv file
def save_history(hist_df, filepath):
# Sample filepath: 'data/models/history/cnn1_history.csv'
hist_csv_file = filepath
with open(hist_csv_file, mode='w') as f:
hist_df.to_csv(f)
# Loads model training history .csv into a pandas dataframe
def load_history(filepath):
# Sample filepath: 'data/models/history/cnn1_history.csv'
hist_df = pd.read_csv(filepath, index_col=0)
return hist_df
# Plot the training accuracy and loss from training history
def plot_history(hist_df, figsize=(10,4), title=None, save=False, filepath=None):
# Create subplots
plt.subplots(1, 2, figsize=figsize)
# Creates a title for the whole plot
plt.suptitle(title, fontsize=24)
# Plot accuracies for train and validation sets
plt.subplot(1, 2, 1)
plt.plot(hist_df['accuracy'], label='Train', marker='o')
plt.plot(hist_df['val_accuracy'], label='Validation', marker='o')
plt.title('Training and Validation Accuracy', size=20)
plt.xlabel('Epoch', size=16)
plt.ylabel('Accuracy', size=16)
plt.legend()
# Plot losses
plt.subplot(1, 2, 2)
plt.plot(hist_df['loss'], label='Train', marker='o')
plt.plot(hist_df['val_loss'], label='Validation', marker='o')
plt.title('Training and Validation Loss', size=20)
plt.xlabel('Epoch', size=16)
plt.ylabel('Loss', size=16)
plt.legend()
# This ensures the subplots do not overlap
plt.tight_layout()
if save:
# Sample filepath: 'data/plots/cnn1_acc_loss_plot.png'
plt.savefig(filepath)
# Show the subplots
plt.show()
# Plot the confusion matrix for a model
def plot_cf_matrix(y_true, y_pred, normalize=True, save=False, filepath=None):
cf_matrix = confusion_matrix(y_true, y_pred)
# Turns the values in the confusion matrix into percentages
if normalize:
cf_matrix = cf_matrix / cf_matrix.sum(axis=1)
ConfusionMatrixDisplay(cf_matrix, display_labels=['Healthy (0)', 'Cancer (1)']).plot()
if save:
# Sample filepath: 'data/plots/cnn1_cf_matrix.png'
plt.savefig(filepath)
plt.show()
# Plot the ROC curve and calculate AUC
def plot_roc_curve(y_true, y_proba, save=False, filepath=None):
if y_proba.shape[1] == 2:
# y_proba is still one-hot encoded, so grab only the class 1 probabilities
y_proba = np.array([i[1] for i in y_proba])
fprs, tprs, thresholds = roc_curve(y_true, y_proba)
roc_auc = roc_auc_score(y_true, y_proba)
plt.figure(figsize=(8, 6))
plt.plot(fprs, tprs, color='darkorange',
lw=2, label='AUC = %0.2f' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlabel('False Positive Rate (FPR)', size=16)
plt.ylabel('True Positive Rate (TPR)', size=16)
plt.title('ROC Curve for Cancer Detection', size=20)
plt.legend(loc="best")
if save:
# Sample filepath: 'data/plots/cnn1_roc.png'
plt.savefig(filepath)
plt.show()
print(f'Area under curve (AUC):{roc_auc}')
# Create a list of ground truth labels from a specified data split
def generate_y_true(pcam, split='test'):
# Initialize iterator so it starts from the beginning
iterator = pcam[split].__iter__()
# Create an empty list to store the labels
y_true = []
if split == 'train':
# There are 262144 images in the training set
for i in range(262144):
y_true.append(int(iterator.get_next()['label']))
else:
# There are 32768 images in the validation and test sets
for i in range(32768):
y_true.append(int(iterator.get_next()['label']))
return np.array(y_true)
# Get predictions as probabilities from a trained model
def generate_y_proba(model, test_pipeline, class_1=False, save=False, filepath=None):
y_proba = model.predict(test_pipeline)
if class_1:
# Return just the class_1 predictions rather than one-hot encoded predictions
y_proba = np.array([i[1] for i in y_proba])
# Save y_proba to a .csv file to load later without training the model
if save:
y_proba_df = pd.DataFrame(y_proba)
# Sample filepath: 'data/models/cnn1_y_proba.csv'
y_proba_csv_file = filepath
with open(y_proba_csv_file, mode='w') as f:
y_proba_df.to_csv(f)
return y_proba
# Load y_proba from a .csv file
def load_y_proba(filepath):
# Sample filepath: 'data/models/cnn1_y_proba.csv'
y_proba = | pd.read_csv(filepath, index_col=0) | pandas.read_csv |
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas
import seaborn as sns
from AnalysisTools import *
tb_Parent = './results/scalars'
doScalarsPlots(dataParent=tb_Parent, regenerate=True)
evalPath = "./results/evalSubs.pickle"
lapsPath = "./results/evalSubsLaps.pickle"
perModelSubPath = "./results//perModelSub.pickle"
redoLapsPickle = False
if redoLapsPickle:
with open(evalPath, 'rb') as f:
allSubs = pickle.load(f)
justLaps = []
for test in allSubs:
pdAux = useJustLaps(allSubs[test])
subID = test[:6]
modelType = test[7:]
pdAux['SubID'] = subID
pdAux['Model'] = modelType
justLaps.append(pdAux.copy())
justLaps = pandas.concat(justLaps)
justLaps['Error'] = justLaps['gt'] - justLaps['pred']
justLaps['Abs_Error'] = np.abs(justLaps['gt'] - justLaps['pred'])
justLaps['Square_Error'] = np.power(justLaps['gt'] - justLaps['pred'], 2)
justLaps.reset_index(inplace=True, drop=True)
with open(lapsPath, 'wb') as f:
pickle.dump(justLaps, f)
else:
with open(lapsPath, 'rb') as f:
justLaps = pickle.load(f)
subs = justLaps['SubID'].unique()
modelTypes = justLaps['Model'].unique()
redoPerModelSub = False
if redoPerModelSub:
perModelSubsData = []
perModelSubsDataTraj = []
for subID in subs:
for model in modelTypes:
m = np.logical_and(justLaps['Model'] == model, justLaps['SubID'] == subID)
s1 = justLaps.where(m).dropna()
rets = calculateErrors(s1)
pdAux = pandas.DataFrame(columns=['meanError', 'meanAbsError', 'rms',
'lags_meanError', 'lags_meanAbsError', 'lags_rms',
'false_positive', 'false_negative'], index=[model + subID])
pdAuxTraj = pandas.DataFrame()
if rets[0] is not np.nan:
# save the data
errorDF, falsePos, falseNeg, lagError, predsMean = rets
pdAux[['meanError', 'meanAbsError', 'rms']] = errorDF.mean().values
pdAux[['lags_meanError', 'lags_meanAbsError', 'lags_rms']] = lagError.mean().values
pdAux['false_positive'] = falsePos
pdAux['false_negative'] = falseNeg
pdAuxTraj['Values'] = predsMean
pdAuxTraj['Time'] = np.arange(predsMean.shape[0])
pdAuxTraj['SubID'] = subID
pdAuxTraj['Model'] = model
else:
errorDF, falsePos, falseNeg, lagError, predsMean = rets
pdAux[['meanError', 'meanAbsError', 'rms']] = 1
pdAux[['lags_meanError', 'lags_meanAbsError', 'lags_rms']] = 1
pdAux['false_positive'] = falsePos
pdAux['false_negative'] = falseNeg
pdAux['SubID'] = subID
pdAux['Model'] = model
perModelSubsData.append(pdAux.copy())
perModelSubsDataTraj.append(pdAuxTraj.copy())
perModelSubsData = pandas.concat(perModelSubsData)
perModelSubsDataTraj = pandas.concat(perModelSubsDataTraj)
perModelSubsDataDict = {'scalars': perModelSubsData, 'Traj': perModelSubsDataTraj}
with open(perModelSubPath, 'wb') as f:
pickle.dump(perModelSubsDataDict, f)
else:
with open(perModelSubPath, 'rb') as f:
perModelSubsDataDict = pickle.load(f)
perModelSubsData = perModelSubsDataDict['scalars']
perModelSubsDataTraj = perModelSubsDataDict['Traj']
perModelSubsDataTraj2 = perModelSubsDataTraj.copy()
perModelSubsDataTraj2['Time'] /= 400
sns.lineplot(x='Time', y='Values', hue='Model', data=perModelSubsDataTraj2)
plt.title('Mean cycle per Model')
plt.xlabel('Gait Cycle Percentage')
plt.ylabel('Gait Cycle Prediction')
falsePredPD = []
for fType in ['false_positive', 'false_negative']:
falsePredPDaux = pandas.DataFrame()
falsePredPDaux[['Error', 'SubID', 'Model']] = perModelSubsData[[fType, 'SubID', 'Model']]
falsePredPDaux['Type'] = fType
falsePredPD.append(falsePredPDaux.copy())
falsePredPD = pandas.concat(falsePredPD)
data = falsePredPD.where(falsePredPD['Type'] == 'false_positive')
ax = sns.boxplot(x='Model', y='Error', data=data)
plt.title('False Positive Heel Strike Identification')
plt.ylabel('Frequency [%]')
plt.tight_layout()
significance = [[0, 1],
[0, 2],
[0, 3],
[0, 4]]
maxHeight = 0.1
minHeight = 0.03
for j, ((c1, c2), h) in enumerate(zip(significance, np.linspace(minHeight, maxHeight, len(significance)))):
addSignificanceLevel(ax, c1, c2, data, '*', 'Error', offsetPercentage=h)
plt.tight_layout()
data = falsePredPD.where(falsePredPD['Type']=='false_negative')
ax = sns.boxplot(x='Model', y='Error', data=data)
plt.title('False Negative Heel Strike Identification')
plt.ylabel('Frequency [%]')
plt.tight_layout()
significance = [[3, 4],
[2, 3],
[1, 2],
[1, 3]]
maxHeight = 0.1
minHeight = 0.03
for j, ((c1, c2), h) in enumerate(zip(significance, np.linspace(minHeight, maxHeight, len(significance)))):
addSignificanceLevel(ax, c1, c2, data, '*', 'Error', offsetPercentage=h)
plt.tight_layout()
lagsPD = []
for fType in ['meanError', 'meanAbsError', 'rms']:
falsePredPDaux = pandas.DataFrame()
falsePredPDaux[['Error', 'SubID', 'Model']] = perModelSubsData[['lags_' + fType, 'SubID', 'Model']]
falsePredPDaux['Type'] = fType
lagsPD.append(falsePredPDaux.copy())
lagsPD = pandas.concat(lagsPD)
perModelSubsData['lags_rms'] = perModelSubsData['lags_rms'].astype(np.float)
ax = sns.boxplot(x='Model', y='lags_rms', data=perModelSubsData)
plt.title('Event ID lag')
plt.ylabel('Delay [ms]')
significance = [[1, 3],
[1, 4]]
maxHeight = 0.1
minHeight = 0.03
for j, ((c1, c2), h) in enumerate(zip(significance, np.linspace(minHeight, maxHeight, len(significance)))):
addSignificanceLevel(ax, c1, c2, perModelSubsData, '*', 'lags_rms', offsetPercentage=h)
plt.tight_layout()
errsPD = []
for fType in ['meanError', 'meanAbsError', 'rms']:
falsePredPDaux = | pandas.DataFrame() | pandas.DataFrame |
import pandas as pd
team_dict = pd.read_csv('/content/gdrive/My Drive/Data Science/Cartola/times_ids.csv')
dict_2014 = {'Atleta': 'atleta_id','Rodada':'rodada','Clube':'clube_id',
'Posicao':'atleta_posicao_id','Pontos':'pontuacao',
'PontosMedia':'pontuacao_media','Preco':'preco',
'PrecoVariacao':'preco_variacao'}
dict_2015 = {"Rodada":'rodada',"ClubeID":'clube_id',"AtletaID":'atleta_id',
"Pontos":'pontuacao',"PontosMedia":'pontuacao_media',"Preco":'preco',
"PrecoVariacao":'preco_variacao'}
dict_2016 = {"Rodada":'rodada',"ClubeID":'clube_id',"AtletaID":'atleta_id',
"Pontos":'pontuacao',"PontosMedia":'pontuacao_media',"Preco":'preco',
"PrecoVariacao":'preco_variacao'}
dict_2017 = {'atletas.apelido':'atleta_apelido','atletas.atleta_id':'atleta_id',
'atletas.clube.id.full.name':'clube_nome','atletas.clube_id':'clube_id',
'atletas.media_num':'pontuacao_media','atletas.nome':'atleta_nome',
'atletas.pontos_num':'pontuacao','atletas.posicao_id':'posicao',
'atletas.preco_num':'preco','Rodada':'rodada','atletas.variacao_num':'preco_variacao'}
dict_2018 = {"atletas.nome":'atleta_nome',"atletas.slug":'atleta_slug',
"atletas.apelido":'atleta_apelido',"atletas.atleta_id":'atleta_id',
"atletas.rodada_id":'rodada',"atletas.clube_id":'clube_id',
"atletas.posicao_id":'posicao',"atletas.pontos_num":'pontuacao',
"atletas.preco_num":'preco',"atletas.variacao_num":'preco_variacao',
"atletas.media_num":'pontuacao_media',"atletas.clube.id.full.name":'clube_nome'}
def get_round(round, year=2018):
data = pd.read_csv('/content/gdrive/My Drive/Data Science/Cartola/' + str(year) + '/rodada-' + str(round) + '.csv')
return data
scouts_2018 = pd.DataFrame()
all_rounds = []
for i in range(1, 39):
all_rounds.append(get_round(i))
scouts_2018 = pd.concat(all_rounds, ignore_index=True, sort=False)
scouts_2014 = pd.read_csv('/content/gdrive/My Drive/Data Science/Cartola/2014/2014_scouts_raw.csv')
scouts_2015 = pd.read_csv('/content/gdrive/My Drive/Data Science/Cartola/2015/2015_scouts_raw.csv')
scouts_2016 = pd.read_csv('/content/gdrive/My Drive/Data Science/Cartola/2016/2016_scouts_raw.csv')
scouts_2017 = pd.read_csv('/content/gdrive/My Drive/Data Science/Cartola/2017/2017_scouts_raw.csv')
players_2016 = pd.read_csv('/content/gdrive/My Drive/Data Science/Cartola/2016/2016_jogadores.csv')
players_2015 = | pd.read_csv('/content/gdrive/My Drive/Data Science/Cartola/2015/2015_jogadores.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas.compat import lrange, lzip, range
import pandas as pd
from pandas import Index, MultiIndex, Series
import pandas.util.testing as tm
def test_equals(idx):
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(np.array(idx))
same_values = Index(idx, dtype=object)
assert idx.equals(same_values)
assert same_values.equals(idx)
if idx.nlevels == 1:
# do not test MultiIndex
assert not idx.equals(pd.Series(idx))
def test_equals_op(idx):
# GH9947, GH10637
index_a = idx
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
series_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_series_equal(series_a == item, Series(expected3))
def test_equals_multi(idx):
assert idx.equals(idx)
assert not idx.equals(idx.values)
assert idx.equals(Index(idx.values))
assert idx.equal_levels(idx)
assert not idx.equals(idx[:-1])
assert not idx.equals(idx[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], codes=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], codes=index.codes[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_codes = np.array([0, 0, 1, 2, 2, 3])
minor_codes = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes])
assert not idx.equals(index)
assert not idx.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_codes = np.array([0, 0, 2, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes])
assert not idx.equals(index)
def test_identical(idx):
mi = idx.copy()
mi2 = idx.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_equals_operator(idx):
# GH9785
assert (idx == idx).all()
def test_equals_missing_values():
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_is_():
mi = MultiIndex.from_tuples(lzip( | range(10) | pandas.compat.range |
"""dynamic user-input-responsive part of mood, and mood graphs"""
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from scipy.signal import lsim, lti
from scipy.signal.ltisys import StateSpaceContinuous
from tqdm.autonotebook import tqdm
from IPython.display import display
from persistence.response_cache import (
ResponseCache,
UserInputIdentifier,
)
from feels.mood import (
random_mood_at_pst_datetime,
logit_diff_to_pos_sent,
pos_sent_to_logit_diff,
)
from util.past import MILESTONE_TIMES
from util.times import now_pst, fromtimestamp_pst
MOOD_IMAGE_DIR = "data/mood_images/"
STEP_SEC = 30 * 1
TAU_SEC = 3600 * 12
TAU_SEC_2ND = 60 * 60
WEIGHTED_AVG_START_TIME = pd.Timestamp("2021-01-04 09:10:00")
WEIGHTED_AVG_P75_WEIGHT = 0.5
RESPONSE_SCALE_BASE = 0.15 # 0.1 # 0.2 #0.5
DETERMINER_CENTER = -3.1 # -2.4 # -1.5 #-2
DETERMINER_CENTER_UPDATES = {
pd.Timestamp("2020-08-20 01:00:00"): -2.4,
pd.Timestamp("2020-08-25 14:00:00"): -2.0,
pd.Timestamp("2020-08-31 09:15:00"): -2.4,
pd.Timestamp("2020-09-16 06:00:00"): -2.1,
pd.Timestamp("2020-10-28 17:00:00"): -2.4,
pd.Timestamp("2020-11-04 11:00:00"): -2.78,
pd.Timestamp("2020-11-13 19:00:00"): -2.7,
pd.Timestamp("2020-11-15 07:30:00"): -2.6,
pd.Timestamp("2020-12-04 07:00:00"): -2.5,
pd.Timestamp("2020-12-10 08:35:00"): -2.35,
pd.Timestamp("2020-12-10 23:45:00"): -2.0,
pd.Timestamp("2020-12-18 15:35:00"): -2.2,
pd.Timestamp("2020-12-21 15:25:00"): -2.3,
WEIGHTED_AVG_START_TIME: 0.0,
pd.Timestamp("2021-02-08 09:25:00"): -0.25,
pd.Timestamp("2021-02-14 17:55:00"): -0.125,
pd.Timestamp("2021-02-15 17:25:00"): 0,
pd.Timestamp("2021-02-16 17:45:00"): 0.5,
pd.Timestamp("2021-02-17 12:45:00"): 0,
pd.Timestamp("2021-02-26 17:30:00"): 0.5,
pd.Timestamp("2021-02-27 16:05:00"): 0.,
pd.Timestamp("2021-03-15 09:55:00"): -0.2,
pd.Timestamp("2021-03-15 19:50:00"): -0.4,
pd.Timestamp("2021-03-20 06:55:00"): 0.,
pd.Timestamp("2021-03-24 22:40:00"): -0.3,
pd.Timestamp("2021-03-31 12:25:00"): -0.5,
pd.Timestamp("2021-04-09 07:10:00"): -0.25,
pd.Timestamp("2021-05-05 17:00:00"): 0.,
pd.Timestamp("2021-05-07 18:15:00"): -0.25,
pd.Timestamp("2021-05-12 07:50:00"): 0.,
pd.Timestamp("2021-05-22 09:50:00"): -0.125,
pd.Timestamp("2021-05-23 07:15:00"): -0.25,
pd.Timestamp("2021-06-05 12:05:00"): -0.5,
pd.Timestamp("2021-06-07 22:35:00"): -0.3,
pd.Timestamp("2021-06-08 13:15:00"): 0.,
pd.Timestamp("2021-06-14 06:55:00"): -0.25,
pd.Timestamp("2021-06-15 18:08:00"): 0.,
pd.Timestamp("2021-06-16 13:00:00"): 0.125,
pd.Timestamp("2021-06-26 07:35:00"): 0.25,
pd.Timestamp("2021-06-30 08:40:00"): 0.,
pd.Timestamp("2021-08-06 00:45:00"): -0.125,
pd.Timestamp("2021-09-21 08:25:00"): 0.,
pd.Timestamp("2021-09-22 17:45:00"): -0.075,
pd.Timestamp("2021-10-24 12:15:00"): -0.,
pd.Timestamp("2021-10-24 08:40:00"): 0.125,
pd.Timestamp("2021-10-25 17:55:00"): 0.25,
pd.Timestamp("2021-10-28 22:40:00"): 0.125,
pd.Timestamp("2021-10-31 18:10:00"): 0.05,
pd.Timestamp("2021-11-02 20:40:00"): 0.,
pd.Timestamp("2021-11-15 19:20:00"): 0.05,
pd.Timestamp("2021-11-17 09:10:00"): 0.1,
pd.Timestamp("2021-11-19 14:50:00"): 0.,
pd.Timestamp("2021-12-24 14:45:00"): 0.1,
pd.Timestamp("2021-12-30 09:55:00"): 0.05,
}
DETERMINER_MULTIPLIER_UPDATES = {
pd.Timestamp("2020-08-25 17:00:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-10-21 21:15:00"): 0.075 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-11-16 10:45:00"): 0.0667 / RESPONSE_SCALE_BASE,
| pd.Timestamp("2020-11-25 11:30:00") | pandas.Timestamp |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
NaT,
Series,
Timestamp,
date_range,
period_range,
)
import pandas._testing as tm
class TestDataFrameValues:
@td.skip_array_manager_invalid_test
def test_values(self, float_frame):
float_frame.values[:, 0] = 5.0
assert (float_frame.values[:, 0] == 5).all()
def test_more_values(self, float_string_frame):
values = float_string_frame.values
assert values.shape[1] == len(float_string_frame.columns)
def test_values_mixed_dtypes(self, float_frame, float_string_frame):
frame = float_frame
arr = frame.values
frame_cols = frame.columns
for i, row in enumerate(arr):
for j, value in enumerate(row):
col = frame_cols[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
arr = float_string_frame[["foo", "A"]].values
assert arr[0, 0] == "bar"
df = DataFrame({"complex": [1j, 2j, 3j], "real": [1, 2, 3]})
arr = df.values
assert arr[0, 0] == 1j
def test_values_duplicates(self):
df = DataFrame(
[[1, 2, "a", "b"], [1, 2, "a", "b"]], columns=["one", "one", "two", "two"]
)
result = df.values
expected = np.array([[1, 2, "a", "b"], [1, 2, "a", "b"]], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_values_with_duplicate_columns(self):
df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=["x", "x"])
result = df.values
expected = np.array([[1, 2.5], [3, 4.5]])
assert (result == expected).all().all()
@pytest.mark.parametrize("constructor", [date_range, period_range])
def test_values_casts_datetimelike_to_object(self, constructor):
series = Series(constructor("2000-01-01", periods=10, freq="D"))
expected = series.astype("object")
df = DataFrame({"a": series, "b": np.random.randn(len(series))})
result = df.values.squeeze()
assert (result[:, 0] == expected.values).all()
df = DataFrame({"a": series, "b": ["foo"] * len(series)})
result = df.values.squeeze()
assert (result[:, 0] == expected.values).all()
def test_frame_values_with_tz(self):
tz = "US/Central"
df = DataFrame({"A": date_range("2000", periods=4, tz=tz)})
result = df.values
expected = np.array(
[
[Timestamp("2000-01-01", tz=tz)],
[Timestamp("2000-01-02", tz=tz)],
[Timestamp("2000-01-03", tz=tz)],
[Timestamp("2000-01-04", tz=tz)],
]
)
tm.assert_numpy_array_equal(result, expected)
# two columns, homogeneous
df["B"] = df["A"]
result = df.values
expected = np.concatenate([expected, expected], axis=1)
tm.assert_numpy_array_equal(result, expected)
# three columns, heterogeneous
est = "US/Eastern"
df["C"] = df["A"].dt.tz_convert(est)
new = np.array(
[
[Timestamp("2000-01-01T01:00:00", tz=est)],
[Timestamp("2000-01-02T01:00:00", tz=est)],
[Timestamp("2000-01-03T01:00:00", tz=est)],
[Timestamp("2000-01-04T01:00:00", tz=est)],
]
)
expected = np.concatenate([expected, new], axis=1)
result = df.values
tm.assert_numpy_array_equal(result, expected)
def test_interleave_with_tzaware(self, timezone_frame):
# interleave with object
result = timezone_frame.assign(D="foo").values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
["foo", "foo", "foo"],
],
dtype=object,
).T
tm.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
result = timezone_frame.values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
],
dtype=object,
).T
tm.assert_numpy_array_equal(result, expected)
def test_values_interleave_non_unique_cols(self):
df = DataFrame(
[[Timestamp("20130101"), 3.5], [ | Timestamp("20130102") | pandas.Timestamp |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import pandas.util.testing as pdt
import qiime2
from q2_taxa import collapse, filter_table, filter_seqs
class CollapseTests(unittest.TestCase):
def assert_index_equal(self, a, b):
# this method is derived from scikit-bio 0.5.1
pdt.assert_index_equal(a, b,
exact=True,
check_names=True,
check_exact=True)
def assert_data_frame_almost_equal(self, left, right):
# this method is derived from scikit-bio 0.5.1
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
self.assert_index_equal(left.index, right.index)
def test_collapse(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;c', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_missing_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;__', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_bad_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
with self.assertRaisesRegex(ValueError, 'of 42 is larger'):
collapse(table, taxonomy, 42)
with self.assertRaisesRegex(ValueError, 'of 0 is too low'):
collapse(table, taxonomy, 0)
def test_collapse_missing_table_ids_in_taxonomy(self):
table = pd.DataFrame([[2.0, 2.0],
[1.0, 1.0],
[9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat3'])
with self.assertRaisesRegex(ValueError, 'missing.*feat2'):
collapse(table, taxonomy, 1)
class FilterTable(unittest.TestCase):
def test_filter_no_filters(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_table(table, taxonomy)
def test_alt_delimiter(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_table(table, taxonomy, include='<EMAIL>',
query_delimiter='@peanut@')
pdt.assert_frame_equal(obs, table, check_like=True)
# exclude with delimiter
obs = filter_table(table, taxonomy, exclude='<EMAIL>',
query_delimiter='@peanut@')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_filter_table_unknown_mode(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_table(table, taxonomy, include='bb', mode='not-a-mode')
def test_filter_table_include(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, include='cc,ee')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='dd')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='peanut!')
def test_filter_table_include_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa; bb; cc,aa; bb; dd ee',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='bb', mode='exact')
def test_filter_table_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='ab')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, exclude='xx')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='dd')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa')
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa; bb')
def test_filter_table_exclude_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='peanut!',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
exclude='aa; bb; cc,aa; bb; dd ee',
mode='exact')
def test_filter_table_include_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa', exclude='peanut!')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only - feat2 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - feat2 dropped at inclusion step
obs = filter_table(table, taxonomy, include='cc', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at inclusion step
obs = filter_table(table, taxonomy, include='ee', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features - all dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='aa',
exclude='bb',
mode='exact')
# keep no features - one dropped at inclusion, one dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='cc',
exclude='cc',
mode='exact')
# keep no features - all dropped at inclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='peanut',
exclude='bb',
mode='exact')
def test_filter_table_underscores_escaped(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep feat1 only - underscore not treated as a wild card
obs = filter_table(table, taxonomy, include='cc,d_')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - underscore in query matches underscore in
# taxonomy annotation
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; c_', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
obs = filter_table(table, taxonomy, include='c_')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_all_features_with_frequency_greater_than_zero_get_filtered(self):
table = pd.DataFrame([[2.0, 0.0], [1.0, 0.0], [9.0, 0.0], [1.0, 0.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# empty - feat2, which is matched by the include term, has a frequency
# of zero in all samples, so all samples end up dropped from the table
with self.assertRaisesRegex(ValueError,
expected_regex='greater than zero'):
filter_table(table, taxonomy, include='dd')
def test_extra_taxon_ignored(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee', 'aa; bb; cc'],
index=pd.Index(['feat1', 'feat2', 'feat3'],
name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
def test_missing_taxon_errors(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc'],
index=pd.Index(['feat1'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, expected_regex='All.*feat2'):
filter_table(table, taxonomy, include='bb')
class FilterSeqs(unittest.TestCase):
def test_filter_no_filters(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_seqs(seqs, taxonomy)
def test_alt_delimiter(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_seqs(seqs, taxonomy, include='cc<EMAIL>',
query_delimiter='@peanut@')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# exclude with delimiter
obs = filter_seqs(seqs, taxonomy, exclude='ww<EMAIL>',
query_delimiter='@peanut@')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
def test_filter_seqs_unknown_mode(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_seqs(seqs, taxonomy, include='bb', mode='not-a-mode')
def test_filter_seqs_include(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, include='bb')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='cc,ee')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only
obs = filter_seqs(seqs, taxonomy, include='cc')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='aa; bb; cc')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only
obs = filter_seqs(seqs, taxonomy, include='dd')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='dd ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='aa; bb; dd ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep no features
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy, include='peanut!')
def test_filter_seqs_include_exact_match(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, include='aa; bb; cc,aa; bb; dd ee',
mode='exact')
exp = | pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2']) | pandas.Series |
# -*- coding: utf-8 -*-
"""Code snippets without context.
SPDX-FileCopyrightText: 2016-2019 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__copyright__ = "<NAME> <<EMAIL>>"
__license__ = "MIT"
# Python libraries
import os
import calendar
# External libraries
import pandas as pd
import geopandas as gpd
try:
from matplotlib import pyplot as plt
except ImportError:
plt = None
from shapely.geometry import Point
# internal modules
import reegis.config as cfg
def lat_lon2point(df):
"""Create shapely point object of latitude and longitude."""
return Point(df['lon'], df['lat'])
def geo_csv_from_shp(shapefile, outfile, id_col, tmp_file='tmp.csv'):
tmp = gpd.read_file(shapefile)
tmp.to_csv(tmp_file)
tmp = pd.read_csv(tmp_file)
new = pd.DataFrame()
new['gid'] = tmp[id_col]
# # Special column manipulations
# new['gid'] = new['gid'].apply(lambda x: x.replace('ü', 'ü'))
# new['region'] = new['gid'].apply(lambda x: x.split('_')[1])
# new['state'] = new['gid'].apply(lambda x: x.split('_')[0])
new['geom'] = tmp['geometry']
new.set_index('gid', inplace=True)
new.to_csv(outfile)
os.remove(tmp_file)
def energy_balance2repo():
chiba = '/home/uwe/chiba/'
source_path = 'Promotion/Statstik/Energiebilanzen/Endenergiebilanz'
csv_path = cfg.get('paths', 'static_sources')
filenames = ['energybalance_DE_2012_to_2014.xlsx',
'energybalance_states_2012_to_2014.xlsx',
'sum_table_fuel_groups.xlsx',
'sum_table_sectors.xlsx']
for filename in filenames:
if 'sum' in filename:
idx = [0, 1]
else:
idx = [0, 1, 2]
excelfile = os.path.join(chiba, source_path, filename)
csvfile = os.path.join(csv_path, filename.replace('.xlsx', '.csv'))
excel2csv(excelfile, csvfile, index_col=idx)
def excel2csv(excel_file, csv_file, **kwargs):
df = pd.read_excel(excel_file, **kwargs)
df.to_csv(csv_file)
def sorter():
b_path = '/home/uwe/express/reegis/data/feedin/solar/'
lg_path = b_path + 'M_LG290G3__I_ABB_MICRO_025_US208/'
sf_path = b_path + 'M_SF160S___I_ABB_MICRO_025_US208/'
pattern = "{0}_feedin_coastdat_de_normalised_solar.h5"
full = os.path.join(b_path, pattern)
full_new_lg = os.path.join(lg_path, pattern)
full_new_sf = os.path.join(sf_path, pattern)
for year in range(1999, 2015):
if os.path.isfile(full.format(year)):
print(full.format(year))
print(year, calendar.isleap(year))
if calendar.isleap(year):
n = 8784
else:
n = 8760
f = pd.HDFStore(full.format(year), mode='r')
new_lg = pd.HDFStore(full_new_lg.format(year), mode='w')
new_sf = pd.HDFStore(full_new_sf.format(year), mode='w')
for key in f.keys():
ls_lg = list()
ls_sf = list()
for col in f[key].columns:
if 'LG' in col:
ls_lg.append(col)
elif 'SF' in col:
ls_sf.append(col)
else:
print(col)
print('Oh noo!')
exit(0)
new_lg[key] = f[key][ls_lg][:n]
new_sf[key] = f[key][ls_sf][:n]
f.close()
new_lg.close()
new_sf.close()
def plz2ireg():
geopath = '/home/uwe/git_local/reegis-hp/reegis_hp/de21/data/geometries/'
geofile = 'postcode_polygons.csv'
plzgeo = pd.read_csv(os.path.join(geopath, geofile), index_col='zip_code',
squeeze=True)
iregpath = '/home/uwe/'
iregfile = 'plzIreg.csv'
plzireg = pd.read_csv(os.path.join(iregpath, iregfile), index_col='plz',
squeeze=True)
plzireg = plzireg.groupby(plzireg.index).first()
ireggeo = pd.DataFrame(pd.concat([plzgeo, plzireg], axis=1))
ireggeo.to_csv(os.path.join(iregpath, 'ireg_geo.csv'))
ireggeo = ireggeo[ireggeo['geom'].notnull()]
# ireggeo['geom'] = geoplot.postgis2shapely(ireggeo.geom)
geoireg = gpd.GeoDataFrame(ireggeo, crs='epsg:4326', geometry='geom')
geoireg.to_file(os.path.join(iregpath, 'ireg_geo.shp'))
# import plots
# plots.plot_geocsv('/home/uwe/ireg_geo.csv', [0], labels=False)
exit(0)
def testerich():
spath = '/home/uwe/chiba/Promotion/Kraftwerke und Speicher/'
sfile = 'Pumpspeicher_in_Deutschland.csv'
storage = pd.read_csv(os.path.join(spath, sfile), header=[0, 1])
storage.sort_index(1, inplace=True)
print(storage)
print(storage['ZFES', 'energy'].sum())
print(storage['Wikipedia', 'energy'].sum())
def decode_wiki_geo_string(gstr):
replist = [('°', ';'), ('′', ';'), ('″', ';'), ('N.', ''), ('O', ''),
('\xa0', ''), (' ', '')]
if isinstance(gstr, str):
for rep in replist:
gstr = gstr.replace(rep[0], rep[1])
gstr = gstr.split(';')
lat = float(gstr[0]) + float(gstr[1]) / 60 + float(gstr[2]) / 3600
lon = float(gstr[3]) + float(gstr[4]) / 60 + float(gstr[5]) / 3600
else:
lat = None
lon = None
return lat, lon
def offshore():
spath = '/home/uwe/chiba/Promotion/Kraftwerke und Speicher/'
sfile = 'offshore_windparks_prepared.csv'
offsh = pd.read_csv(os.path.join(spath, sfile), header=[0, 1],
index_col=[0])
print(offsh)
# offsh['Wikipedia', 'geom'] = offsh['Wikipedia', 'geom_str'].apply(
# decode_wiki_geo_string)
# offsh[[('Wikipedia', 'latitude'), ('Wikipedia', 'longitude')]] = offsh[
# 'Wikipedia', 'geom'].apply(pd.Series)
# offsh.to_csv(os.path.join(spath, 'offshore_windparks_prepared.csv'))
def bmwe():
spath = '/home/uwe/chiba/Promotion/Kraftwerke und Speicher/'
sfile1 = 'installation_bmwe.csv'
sfile2 = 'strom_bmwe.csv'
# sfile3 = 'hydro.csv'
inst = pd.read_csv(os.path.join(spath, sfile1), index_col=[0]).astype(
float)
strom = pd.read_csv(os.path.join(spath, sfile2), index_col=[0]).astype(
float)
# hydro = pd.read_csv(os.path.join(spath, sfile3), index_col=[0],
# squeeze=True).astype(float)
cols = pd.MultiIndex(levels=[[], []], labels=[[], []],
names=['type', 'value'])
df = pd.DataFrame(index=inst.index, columns=cols)
for col in inst.columns:
df[col, 'capacity'] = inst[col]
df[col, 'energy'] = strom[col]
df.to_csv('/home/uwe/git_local/reegis-hp/reegis_hp/de21/data/static/'
'energy_capacity_bmwi_readme.csv')
def prices():
# from matplotlib import pyplot as plt
spath = '/home/uwe/git_local/reegis-hp/reegis_hp/de21/data/static/'
sfile = 'commodity_sources_prices.csv'
price = pd.read_csv(os.path.join(spath, sfile),
index_col=[0], header=[0, 1])
print(price)
price['Erdgas'].plot()
plt.show()
def load_energiebilanzen():
spath = '/home/uwe/chiba/Promotion/Energiebilanzen/2014/'
# sfile = 'Energiebilanz RheinlandPfalz 2014.xlsx'
sfile = 'Energiebilanz BadenWuerttemberg2014.xls'
filename = os.path.join(spath, sfile)
header = pd.read_excel(filename, 0, index=[0, 1, 2, 3, 4], header=None
).iloc[:3, 5:].ffill(axis=1)
eb = pd.read_excel(filename, 0, skiprows=3, index_col=[0, 1, 2, 3, 4],
skip_footer=2)
eb.columns = pd.MultiIndex.from_arrays(header.values)
# print(eb)
# print(eb.loc[pd.IndexSlice[
# 'ENDENERGIEVERBRAUCH',
# :,
# :,
# 84]].transpose())
eb.sort_index(0, inplace=True)
eb.sort_index(1, inplace=True)
#
print(eb.loc[(slice(None), slice(None), slice(None), 84), 'Braunkohlen'])
# print(eb.columns)
def create_small_data_sets():
fn = '/home/uwe/express/reegis/data/powerplants/opsd/renewable_power_plants_DE.csv'
pp = | pd.read_csv(fn) | pandas.read_csv |
from datetime import datetime
import re
import unittest
import nose
from nose.tools import assert_equal
import numpy as np
from pandas.tslib import iNaT
from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp
from pandas import compat
from pandas.compat import range, long, lrange, lmap, u
from pandas.core.common import notnull, isnull
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com._is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert(not is_seq(A()))
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(notnull(float_series), Series))
assert(isinstance(notnull(obj_series), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(isnull(float_series), Series))
assert(isinstance(isnull(obj_series), Series))
# call on DataFrame
df = DataFrame(np.random.randn(10, 5))
df['foo'] = 'bar'
result = isnull(df)
expected = result.apply(isnull)
tm.assert_frame_equal(result, expected)
def test_isnull_tuples():
result = isnull((1, 2))
exp = np.array([False, False])
assert(np.array_equal(result, exp))
result = isnull([(False,)])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([(1,), (2,)])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(('foo', 'bar'))
assert(not result.any())
result = isnull((u('foo'), u('bar')))
assert(not result.any())
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert(not result.any())
result = isnull([u('foo'), u('bar')])
assert(not result.any())
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert(mask[0])
assert(not mask[1:].any())
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
df.iloc[3:6,:] = np.nan
result = df.loc[4,'B'].value
assert(result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(tuple([slice(8,9)]),np.nan)
assert(isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').astype(np.int64))
def test_any_none():
assert(com._any_none(1, 2, 3, None))
assert(not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(com._all_not_none(1, 2, 3, 4))
assert(not com._all_not_none(1, 2, 3, None))
assert(not com._all_not_none(None, None, None, None))
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = com.pprint_thing(b, quote_strings=True)
assert_equal(res, repr(b))
res = com.pprint_thing(b, quote_strings=False)
assert_equal(res, b)
def test_rands():
r = com.rands(10)
assert(len(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = com.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(com.iterpairs(data))
assert(result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = com.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert(a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_is_list_like():
passes = ([], [1], (1,), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str)
fails = (1, '2', object())
for p in passes:
assert com.is_list_like(p)
for f in fails:
assert not com.is_list_like(f)
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
def test_ensure_platform_int():
# verify that when we create certain types of indices
# they remain the correct type under platform conversions
from pandas.core.index import Int64Index
# int64
x = Int64Index([1, 2, 3], dtype='int64')
assert(x.dtype == np.int64)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# int32
x = Int64Index([1, 2, 3], dtype='int32')
assert(x.dtype == np.int32)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# raise nose.SkipTest
# with tm.stdin_encoding(encoding=None):
# result = com.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
def test_is_re():
passes = re.compile('ad'),
fails = 'x', 2, 3, object()
for p in passes:
assert com.is_re(p)
for f in fails:
assert not com.is_re(f)
def test_is_recompilable():
passes = (r'a', u('x'), r'asdf', re.compile('adsf'),
u(r'\u2233\s*'), re.compile(r''))
fails = 1, [], object()
for p in passes:
assert com.is_re_compilable(p)
for f in fails:
assert not com.is_re_compilable(f)
class TestTake(unittest.TestCase):
# standard incompatible fill error
fill_error = re.compile("Incompatible type for fill_value")
_multiprocess_can_split_ = True
def test_1d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, 1]
out = np.empty(4, dtype=dtype)
com.take_1d(data, indexer, out=out)
expected = data.take(indexer)
tm.assert_almost_equal(out, expected)
indexer = [2, 1, 0, -1]
out = np.empty(4, dtype=dtype)
if can_hold_na:
com.take_1d(data, indexer, out=out)
expected = data.take(indexer)
expected[3] = np.nan
tm.assert_almost_equal(out, expected)
else:
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_1d(data, indexer, out=out)
# no exception o/w
data.take(indexer, out=out)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_1d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, -1]
result = com.take_1d(data, indexer, fill_value=fill_value)
assert((result[[0, 1, 2]] == data[[2, 1, 0]]).all())
assert(result[3] == fill_value)
assert(result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = com.take_1d(data, indexer, fill_value=fill_value)
assert((result[[0, 1, 2, 3]] == data[indexer]).all())
assert(result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_2d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
if can_hold_na:
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected0[3, :] = np.nan
expected1[:, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
else:
for i, out in enumerate([out0, out1]):
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_nd(data, indexer, out=out, axis=i)
# no exception o/w
data.take(indexer, out=out, axis=i)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_2d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2], :] == data[[2, 1, 0], :]).all())
assert((result[3, :] == fill_value).all())
assert(result.dtype == out_dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all())
assert((result[:, 3] == fill_value).all())
assert(result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2, 3], :] == data[indexer, :]).all())
assert(result.dtype == dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2, 3]] == data[:, indexer]).all())
assert(result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_3d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
com.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
if can_hold_na:
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
com.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
expected0[3, :, :] = np.nan
expected1[:, 3, :] = np.nan
expected2[:, :, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
else:
for i, out in enumerate([out0, out1, out2]):
with tm.assertRaisesRegexp(TypeError, self.fill_error):
| com.take_nd(data, indexer, out=out, axis=i) | pandas.core.common.take_nd |
import streamlit as st
import pandas as pd
import numpy as np
import sklearn.neighbors
import pydeck as pdk
import seaborn as sns
from util import config
from util import mapping
from util import trip_data
@st.cache(suppress_st_warning=True)
def load_data():
st.write('Loading data...')
trips = pd.read_feather(config.PROCESSED_DATA_PATH + 'trips_scaled.feather')
trips.set_index('rte_id', inplace=True)
gridpts_at_rte_500 = | pd.read_feather(config.PROCESSED_DATA_PATH + 'gridpts_at_rte_500.feather') | pandas.read_feather |
import requests
import datetime
import numpy as np
import pandas as pd
today_dt = datetime.date.today()
today = today_dt.strftime('%Y-%m-%d')
OURWORLDINDATA = "https://covid.ourworldindata.org/data/owid-covid-data.csv"
REPO_DATA = (
'https://raw.githubusercontent.com/'
'mauforonda/covid19-bolivia/master/data.json'
)
def fetch_from_ourworldindata(iso_code=None):
response = requests.get(OURWORLDINDATA)
with open('ourworldindata.csv', 'w+') as f:
f.write(response.text)
with open(f'data/ourworldindata_{today}.csv', 'w+') as f:
f.write(response.text)
df = pd.read_csv('ourworldindata.csv')
return df
def fetch_from_covid19_bolivia_repo():
response = requests.get(REPO_DATA)
data = response.json()
rows = []
for item in data['confirmados']:
row = {'fecha': item['fecha']}
row.update(item['dep'])
rows.append(row)
cities = [
'la_paz', 'cochabamba', 'santa_cruz', 'oruro', 'potosí', 'tarija',
'chuquisaca', 'beni', 'pando'
]
df = pd.DataFrame(rows)
df['total'] = df[cities].sum(axis=1)
filtered = df[(['fecha', 'total'] + cities)]
filtered.columns = ['ds', 'y'] + cities
return filtered
def get_data(source='ourworldindata'):
if source == 'ourworldindata':
filtered = fetch_from_ourworldindata()
elif source == 'github':
return fetch_from_covid19_bolivia_repo()
elif filtered == 'boliviasegura':
url = 'https://boliviasegura.agetic.gob.bo/wp-content/json/api.php'
filtered = requests.get(url).json()
filtered.to_csv(f'data/{source}.csv', index=False)
filtered.to_csv(f'data/{source}_{today}.csv', index=False)
filtered.sort_values(by='ds', inplace=True)
return filtered
def get_population():
population = pd.read_csv('data/population.csv')
population = population[population['Year'] == '2019'].sort_values(['Population'], ascending=False)
population = population[pd.notna(population['Code'])]
return population
def get_full_data(source='ourworldindata', force=False):
if force:
df = fetch_from_ourworldindata()
else:
df = pd.read_csv(f'{source}.csv')
df = df[df['iso_code'] != 'OWID_WRL']
df = df[ | pd.notnull(df['iso_code']) | pandas.notnull |
# Imports
from flask import render_template, redirect, request, session, url_for, Flask, g
from dotenv import load_dotenv
from pathlib import Path
import yfinance as yf
import datetime as d
import pynance as pn
import pandas as pd
import requests
import glob
import json
import os
import io
from sendmail import send_mail, send_buy, send_sell
from models import users, contactus, stock
# Import environment variables
load_dotenv()
key_id = os.getenv("KEY_ID")
key_secret = os.getenv("KEY_SECRET")
# Initialize Payment Session
request_payment = requests.Session()
request_payment.auth = (key_id, key_secret)
payment_data = json.load(open("payment_data.json"))
# Path to database
path = "app.db"
# To pass data from one page to another
class state:
...
s = state()
# App configuration
templates_path = os.path.abspath("./templates")
app = Flask(__name__, template_folder=templates_path)
app.secret_key = "somekey"
app.config["SEND_FILE_MAX_AGE_DEFAULT"] = 0
# Create tables
users.create_user(path)
contactus.create_tbl(path)
stock.make_tbl(path)
def get_current_price(symbol: str) -> float:
"""Gets current closing price of stock using Ticker method
Args:
symbol: Stock Symbol
Returns:
float: Closing Stock price
"""
ticker = yf.Ticker(symbol)
todays_data = ticker.history(period="1d")
return float(todays_data["Close"][0])
def get_current_stock_price(symbol: str) -> float:
"""Gets current closing price of stock
(Substitute for init function error)
Args:
symbol: Stock Symbol
Returns:
float: Closing Stock price
"""
data = pn.data.get(symbol, start=None, end=None)
return float(data["Close"][0])
class Currency_Conversion:
"""
API Class for currency conversion
"""
rates = {}
def __init__(self, url):
data = requests.get(url).json()
self.rates = data["rates"]
def convert(self, from_currency, to_currency, amount) -> float:
"""Converts one currency to another
Args:
from_currency: Currency to be converted from
to_cuurency: Currency to be converted to
amount: amount to be converted
Returns:
float: Converted amount
"""
initial_amount = amount
if from_currency != "EUR":
amount = amount / self.rates[from_currency]
amount = round(amount * self.rates[to_currency], 2)
return amount
# List of stock symbols from URL containing NASDAQ listings
url = "https://pkgstore.datahub.io/core/nasdaq-listings/nasdaq-listed_csv/data/7665719fb51081ba0bd834fde71ce822/nasdaq-listed_csv.csv"
data = requests.get(url).content
df_data = pd.read_csv(io.StringIO(data.decode("utf-8")))
symbols = df_data["Symbol"].to_list()
@app.before_request
def security():
"""
Sets current user (g.user) to none and checks if the user is in session
If in session then email is fetched and g.user is updated to that email
"""
g.user = None
if "user_email" in session:
emails = users.getemail(path)
try:
useremail = [
email for email in emails if email[0] == session["user_email"]
][0]
g.user = useremail
except Exception as e:
print("Failed")
@app.route("/", methods=["GET", "POST"])
def home():
"""
Login Page
"""
session.pop("user_email", None)
flag = True
# Store input if a post request is made
if request.method == "POST":
name = request.form["name"]
email = request.form["email"]
password = request.form["password"]
repeat_password = request.form["rpassword"]
if password and not repeat_password:
if users.check_user_exist(path, email):
if users.check_hash(path, password, email):
session["user_email"] = email
return redirect("/index")
else:
flag = False
return render_template(
"login.html", error="Incorrect Email or Password"
)
else:
return render_template("login.html", error="User Doesnt Exist")
if password and repeat_password:
if not users.check_user_exist(path, email):
if password == repeat_password:
password = users.hash_pwd(password)
users.insert(path, "user", (email, name, password, 0))
session["user_email"] = email
return render_template(
"login.html", error="Sign Up Complete - Login"
)
else:
return render_template(
"login.html", error="Password & Retyped Password Not Same"
)
else:
return render_template(
"login.html", error="This User Already Exists! Try Again"
)
if not name and not password and email:
if users.check_user_exist(path, email):
reset_password(path, email)
return render_template(
"login.html",
error="We have sent you a link to reset your password. Check your mailbox",
)
else:
return render_template(
"login.html", error="This Email Doesnt Exist - Please Sign Up"
)
if flag:
return render_template("login.html")
@app.route("/index", methods=["GET", "POST"])
def index():
"""
Home Page
"""
if g.user:
return render_template("index.html")
return redirect("/")
def reset_password(path: str, email: str):
"""
Sends mail for resetting password to user
"""
send_mail(path, email)
@app.route("/reset", methods=["GET", "POST"])
def reset():
"""
Reset Password Page
"""
if request.method == "POST":
pwd = request.form["<PASSWORD>"]
repeat_pwd = request.form["rnpassword"]
ver_code = request.form["vcode"]
try:
ver_code = int(ver_code)
except:
raise TypeError
if pwd and repeat_pwd and ver_code:
if pwd == repeat_pwd:
if users.check_code(path, ver_code):
pwd = users.hash_pwd(pwd)
users.reset_pwd(path, pwd, ver_code)
users.reset_code(path, ver_code)
return redirect("/")
else:
return render_template(
"reset.html", error="Incorrect Verification Code"
)
else:
return render_template(
"reset.html", error="Password & Retyped Password Not Same"
)
return render_template("reset.html")
@app.route("/inv", methods=["GET", "POST"])
def inv():
"""
Analysis Page - displays historical stock data
"""
if g.user:
if request.method == "POST":
stock_id = request.form["stocksym"]
stock_id = stock_id.upper()
if stock_id in symbols:
df_stock = yf.download(stock_id, start="1950-01-01", period="1d")
else:
return render_template(
"inv.html",
error="Incorrect Stock Symbol. Please Enter Valid Symbol",
)
df_stock.drop("Adj Close", axis="columns", inplace=True)
df_stock.reset_index(inplace=True)
df_stock["Date"] = | pd.to_datetime(df_stock["Date"]) | pandas.to_datetime |
"""This module contains code for binding together the clustering and fnotching tools to create the
final catalog files.
When run as a script from command-line, it requires to launch a local ipcontroller for the parallel
processing.
If you execute this locally, you can create one with `ipcluster start -n <no>`, with <no> the number
of cores you want to provide to the parallel processing routines.
"""
import argparse
import itertools
import logging
import string
import pandas as pd
from ipyparallel import Client
from ipyparallel.util import interactive
from tqdm import tqdm
from nbtools import execute_in_parallel
from . import fnotching, io, metadata as p4meta
from .projection import TileCalculator, create_RED45_mosaic, xy_to_hirise, XY2LATLON
LOGGER = logging.getLogger(__name__)
# logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
def fan_id_generator():
for newid in itertools.product(string.digits + 'abcdef', repeat=6):
yield 'F' + ''.join(newid)
def blotch_id_generator():
for newid in itertools.product(string.digits + 'abcdef', repeat=6):
yield 'B' + ''.join(newid)
def add_marking_ids(path, fan_id, blotch_id):
"""Add marking_ids for catalog to cluster results.
Parameters
----------
path : str, pathlib.Path
Path to L1A image_id clustering result directory
fan_id, blotch_id : generator
Generator for marking_id
"""
image_id = path.parent.name
for kind, id_ in zip(['fans', 'blotches'], [fan_id, blotch_id]):
fname = str(path / f"{image_id}_L1A_{kind}.csv")
try:
df = pd.read_csv(fname)
except FileNotFoundError:
continue
else:
marking_ids = []
for _ in range(df.shape[0]):
marking_ids.append(next(id_))
df['marking_id'] = marking_ids
df.to_csv(fname, index=False)
def get_L1A_paths(obsid, savefolder):
pm = io.PathManager(obsid=obsid, datapath=savefolder)
paths = pm.get_obsid_paths('L1A')
return paths
def cluster_obsid(obsid=None, savedir=None, imgid=None):
"""Cluster all image_ids for given obsid (=image_name).
Parameters
----------
obsid : str
HiRISE obsid (= Planet four image_name)
savedir : str or pathlib.Path
Top directory path where the catalog will be stored. Will create folder if it
does not exist yet.
imgid : str, optional
Convenience parameter: If `obsid` is not given and therefore is None, this `image_id` can
be used to receive the respective `obsid` from the TileID class.
"""
# import here to support parallel execution
from planet4 import dbscan, markings
# parameter checks
if obsid is None and imgid is not None:
obsid = markings.TileID(imgid).image_name
elif obsid is None and imgid is None:
raise ValueError("Provide either obsid or imgid.")
# cluster
dbscanner = dbscan.DBScanner(savedir=savedir)
dbscanner.cluster_image_name(obsid)
def fnotch_obsid(obsid=None, savedir=None, fnotch_via_obsid=False,
imgid=None):
"""
fnotch_via_obsid: bool, optional
Switch to control if fnotching happens per image_id or per obsid
"""
from planet4 import fnotching
# fnotching / combining ambiguous cluster results
# fnotch across all the HiRISE image
# does not work yet correctly! Needs to scale for n_classifications
if fnotch_via_obsid is True:
fnotching.fnotch_obsid(obsid, savedir=savedir)
fnotching.apply_cut_obsid(obsid, savedir=savedir)
else:
# default case: Fnotch for each image_id separately.
fnotching.fnotch_image_ids(obsid, savedir=savedir)
fnotching.apply_cut(obsid, savedir=savedir)
return obsid
def cluster_obsid_parallel(args):
"Create argument tuples for cluster_obsid, for parallel usage."
obsid, savedir = args
return cluster_obsid(obsid, savedir)
def fnotch_obsid_parallel(args):
"Create argument tuples for cluster_obsid, for parallel usage."
obsid, savedir = args
return fnotch_obsid(obsid, savedir)
class ReleaseManager:
"""Class to manage releases and find relevant files.
Parameters
----------
version : str
Version string for this catalog. Same as datapath in other P4 code.
obsids : iterable, optional
Iterable of obsids that should be used for catalog file. Default is to use the full list of the default database, which is Seasons 2 and 3 at this point.
overwrite : bool, optional
Switch to control if already existing result folders for an obsid should be overwritten.
Default: False
"""
DROP_FOR_TILE_COORDS = ['xy_hirise', 'SampleResolution',
'LineResolution', 'PositiveWest360Longitude',
'Line', 'Sample']
FAN_COLUMNS_AS_PUBLISHED = [
'marking_id', 'angle', 'distance', 'tile_id', 'image_x', 'image_y', 'n_votes',
'obsid', 'spread', 'version', 'vote_ratio', 'x', 'y', 'x_angle', 'y_angle', 'l_s',
'map_scale', 'north_azimuth', 'BodyFixedCoordinateX', 'BodyFixedCoordinateY',
'BodyFixedCoordinateZ', 'PlanetocentricLatitude', 'PlanetographicLatitude',
'Longitude'
]
BLOTCH_COLUMNS_AS_PUBLISHED = [
'marking_id', 'angle', 'tile_id', 'image_x', 'image_y', 'n_votes', 'obsid',
'radius_1', 'radius_2', 'vote_ratio', 'x', 'y', 'x_angle', 'y_angle', 'l_s',
'map_scale', 'north_azimuth', 'BodyFixedCoordinateX', 'BodyFixedCoordinateY',
'BodyFixedCoordinateZ', 'PlanetocentricLatitude', 'PlanetographicLatitude',
'Longitude'
]
def __init__(self, version, obsids=None, overwrite=False):
self.catalog = f'P4_catalog_{version}'
self.overwrite = overwrite
self._obsids = obsids
@property
def savefolder(self):
"Path to catalog folder"
return io.data_root / self.catalog
@property
def metadata_path(self):
"Path to catalog metadata file."
return self.savefolder / f"{self.catalog}_metadata.csv"
@property
def tile_coords_path(self):
"Path to catalog tile coordinates file."
return self.savefolder / f"{self.catalog}_tile_coords.csv"
@property
def tile_coords_path_final(self):
"Path to final catalog tile coordinates file."
return self.savefolder / f"{self.catalog}_tile_coords_final.csv"
@property
def obsids(self):
"""Return list of obsids for catalog production.
If ._obsids is None, get default full obsids list for current default P4 database.
"""
if self._obsids is None:
db = io.DBManager()
self._obsids = db.obsids
return self._obsids
@obsids.setter
def obsids(self, values):
self._obsids = values
@property
def fan_file(self):
"Return path to fan catalog file."
try:
return next(self.savefolder.glob("*_fan.csv"))
except StopIteration:
print(f"No file found. Looking at {self.savefolder}.")
@property
def blotch_file(self):
"Return path to blotch catalog file."
try:
return next(self.savefolder.glob("*_blotch.csv"))
except StopIteration:
print(f"No file found. Looking at {self.savefolder}.")
@property
def fan_merged(self):
return self.fan_file.parent / f"{self.fan_file.stem}_meta_merged.csv"
@property
def blotch_merged(self):
return self.blotch_file.parent / f"{self.blotch_file.stem}_meta_merged.csv"
def read_fan_file(self):
return pd.read_csv(self.fan_merged)
def read_blotch_file(self):
return pd.read_csv(self.blotch_merged)
def check_for_todo(self):
bucket = []
for obsid in self.obsids:
pm = io.PathManager(obsid=obsid, datapath=self.savefolder)
path = pm.obsid_results_savefolder / obsid
if path.exists() and self.overwrite is False:
continue
else:
bucket.append(obsid)
self.todo = bucket
def get_parallel_args(self):
return [(i, self.catalog) for i in self.todo]
def get_no_of_tiles_per_obsid(self):
db = io.DBManager()
all_data = db.get_all()
return all_data.groupby('image_name').image_id.nunique()
@property
def EDRINDEX_meta_path(self):
return self.savefolder / f"{self.catalog}_EDRINDEX_metadata.csv"
def calc_metadata(self):
if not self.EDRINDEX_meta_path.exists():
NAs = p4meta.get_north_azimuths_from_SPICE(self.obsids)
edrindex = pd.read_hdf("/Volumes/Data/hirise/EDRCUMINDEX.hdf")
p4_edr = edrindex[edrindex.OBSERVATION_ID.isin(self.obsids)].query(
'CCD_NAME=="RED4"').drop_duplicates(subset='OBSERVATION_ID')
p4_edr = p4_edr.set_index('OBSERVATION_ID').join(
NAs.set_index('OBSERVATION_ID'))
p4_edr = p4_edr.join(self.get_no_of_tiles_per_obsid())
p4_edr.rename(dict(image_id="# of tiles"), axis=1, inplace=True)
p4_edr['map_scale'] = 0.25 * p4_edr.BINNING
p4_edr.reset_index(inplace=True)
p4_edr.to_csv(self.EDRINDEX_meta_path)
else:
p4_edr = pd.read_csv(self.EDRINDEX_meta_path)
cols = ['OBSERVATION_ID', 'IMAGE_CENTER_LATITUDE', 'IMAGE_CENTER_LONGITUDE', 'SOLAR_LONGITUDE', 'START_TIME',
'map_scale', 'north_azimuth', '# of tiles']
metadata = p4_edr[cols]
metadata.to_csv(self.metadata_path, index=False, float_format="%.7f")
LOGGER.info("Wrote %s", str(self.metadata_path))
def calc_tile_coordinates(self):
edrpath = io.get_ground_projection_root()
cubepaths = [edrpath / obsid /
f"{obsid}_mosaic_RED45.cub" for obsid in self.obsids]
todo = []
for cubepath in cubepaths:
tc = TileCalculator(cubepath, read_data=False)
if not tc.campt_results_path.exists():
todo.append(cubepath)
def get_tile_coords(cubepath):
from planet4.projection import TileCalculator
tilecalc = TileCalculator(cubepath)
tilecalc.calc_tile_coords()
if not len(todo) == 0:
results = execute_in_parallel(get_tile_coords, todo)
bucket = []
for cubepath in tqdm(cubepaths):
tc = TileCalculator(cubepath, read_data=False)
bucket.append(tc.tile_coords_df)
coords = pd.concat(bucket, ignore_index=True, sort=False)
coords.to_csv(self.tile_coords_path, index=False, float_format="%.7f")
LOGGER.info("Wrote %s", str(self.tile_coords_path))
@property
def COLS_TO_MERGE(self):
return ['obsid', 'image_x', 'image_y',
'BodyFixedCoordinateX', 'BodyFixedCoordinateY', 'BodyFixedCoordinateZ',
'PlanetocentricLatitude', 'PlanetographicLatitude', 'PositiveEast360Longitude']
def merge_fnotch_results(self, fans, blotches):
"""Average multiple objects from fnotching into one.
Because fnotching can compare the same object with more than one, it can appear more than once
with different `vote_ratio` values in the results. We merge them here into one, simply
averaging the vote_ratio. This increases the value of the `vote_ratio` number as it now
has been created by several comparisons. It only occurs for 0.5 % of fans though.
"""
out = []
for df in [fans, blotches]:
averaged = df.groupby('marking_id').mean()
tmp = df.drop_duplicates(
subset='marking_id').set_index('marking_id')
averaged = averaged.join(
tmp[['image_id', 'obsid']], how='inner')
out.append(averaged.reset_index())
return out
def merge_all(self):
# read in data files
fans = pd.read_csv(self.fan_file)
blotches = pd.read_csv(self.blotch_file)
meta = pd.read_csv(self.metadata_path, dtype='str')
tile_coords = pd.read_csv(self.tile_coords_path, dtype='str')
# average multiple fnotch results
fans, blotches = self.merge_fnotch_results(fans, blotches)
# merge meta
cols_to_merge = ['OBSERVATION_ID',
'SOLAR_LONGITUDE', 'north_azimuth', 'map_scale']
fans = fans.merge(meta[cols_to_merge],
left_on='obsid', right_on='OBSERVATION_ID')
blotches = blotches.merge(
meta[cols_to_merge], left_on='obsid', right_on='OBSERVATION_ID')
# drop unnecessary columns
tile_coords.drop(self.DROP_FOR_TILE_COORDS, axis=1, inplace=True)
# save cleaned tile_coords
tile_coords.rename({'image_id': 'tile_id'}, axis=1, inplace=True)
tile_coords.to_csv(self.tile_coords_path_final,
index=False, float_format='%.7f')
# merge campt results into catalog files
fans, blotches = self.merge_campt_results(fans, blotches)
# write out fans catalog
fans.vote_ratio.fillna(1, inplace=True)
fans.version = fans.version.astype('int')
fans.rename(
{'image_id': 'tile_id', 'SOLAR_LONGITUDE': 'l_s',
'PositiveEast360Longitude': 'Longitude'}, axis=1, inplace=True)
fans[self.FAN_COLUMNS_AS_PUBLISHED].to_csv(
self.fan_merged, index=False)
LOGGER.info("Wrote %s", str(self.fan_merged))
# write out blotches catalog
blotches.vote_ratio.fillna(1, inplace=True)
blotches.rename(
{'image_id': 'tile_id', 'SOLAR_LONGITUDE': 'l_s',
'PositiveEast360Longitude': 'Longitude'}, axis=1, inplace=True)
blotches[self.BLOTCH_COLUMNS_AS_PUBLISHED].to_csv(
self.blotch_merged, index=False)
LOGGER.info("Wrote %s", str(self.blotch_merged))
def calc_marking_coordinates(self):
fans = | pd.read_csv(self.fan_file) | pandas.read_csv |
# Written by i3s
import os
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import seaborn as sns
import time
from sklearn.model_selection import KFold
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.cross_decomposition import PLSRegression
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
def proj_l1ball(y, eta):
"""
Note that the y should be better 1D or after some element-wise operation, the results will turn to be un predictable.
This function will automatically reshape the y as (m,), where m is the y.size, or the y.shape[0]*y.shape[1].
"""
if type(y) is not np.ndarray:
y = np.array(y)
if y.ndim > 1:
y = np.reshape(y, (-1,))
return np.maximum(
np.absolute(y)
- np.amax(
[
np.amax(
(np.cumsum(np.sort(np.absolute(y), axis=0)[::-1], axis=0) - eta)
/ (np.arange(y.shape[0]) + 1)
),
0,
]
),
0,
) * np.sign(y)
def centroids(XW, Y, k):
Y = np.reshape(Y, -1)
d = XW.shape[1]
mu = np.zeros((k, d))
"""
since in python the index starts from 0 not from 1,
here the Y==i will be change to Y==(i+1)
Or the values in Y need to be changed
"""
for i in range(k):
C = XW[Y == (i + 1), :]
mu[i, :] = np.mean(C, axis=0)
return mu
def class2indicator(y, k):
if len(y.shape) > 1:
# Either throw exception or transform y, here the latter is chosen.
# Note that a list object has no attribute 'flatten()' as np.array do,
# We use x = np.reshape(y,-1) instead of x = y.flatten() in case of
# the type of 'list' of argument y
y = np.reshape(y, -1)
n = len(y)
Y = np.zeros((n, k)) # dtype=float by default
"""
since in python the index starts from 0 not from 1,
here the y==i in matlab will be change to y==(i+1)
"""
for i in range(k):
Y[:, i] = y == (i + 1)
return Y
def nb_Genes(w):
# Return the number of selected genes from the matrix (numpy.ndarray) w
d = w.shape[0]
ind_genes = np.zeros((d, 1))
for i in range(d):
if np.linalg.norm(w[i, :]) > 0:
ind_genes[i] = 1
indGene_w = np.where(ind_genes == 1)[0]
nbG = int(np.sum(ind_genes))
return nbG, indGene_w
def select_feature_w(w, featurenames):
k = w.shape[1]
d = w.shape[0]
lst_features = []
lst_norm = []
for i in range(k):
s_tmp = w[:, i] # the i-th column
f_tmp = np.abs(s_tmp) # the absolute values of this column
ind = np.argsort(f_tmp)[
::-1
] # the indices of the sorted abs column (descending order)
f_tmp = np.sort(f_tmp)[::-1] # the sorted abs column (descending order)
nonzero_inds = np.nonzero(f_tmp)[0] # the nonzero indices
lst_f = []
lst_n = []
if len(nonzero_inds) > 0:
nozero_ind = nonzero_inds[-1] # choose the last nonzero index
if nozero_ind == 0:
lst_f.append(featurenames[ind[0]])
lst_n.append(s_tmp[ind[0]])
else:
for j in range(nozero_ind + 1):
lst_f.append(featurenames[ind[j]])
lst_n = s_tmp[ind[0 : (nozero_ind + 1)]]
lst_features.append(lst_f)
lst_norm.append(lst_n)
n_cols_f = len(lst_features)
n_rows_f = max(map(len, lst_features)) # maxmum subset length
n_cols_n = len(lst_norm)
n_rows_n = max(map(len, lst_norm))
for i in range(n_cols_f):
ft = np.array(lst_features[i])
ft.resize(n_rows_f, refcheck=False)
nt = np.array(lst_norm[i])
nt.resize(n_rows_n, refcheck=False)
if i == 0:
features = ft
normW = nt
continue
features = np.vstack((features, ft))
normW = np.vstack((normW, nt))
features = features.T
normW = normW.T
return features, normW
def compute_accuracy(idxR, idx, k):
"""
# ===============================
#----- INPUT
# idxR : real labels
# idx : estimated labels
# k : number of class
#----- OUTPUT
# ACC_glob : global accuracy
# tab_acc : accuracy per class
# ===============================
"""
# Note that Python native sum function works better on list than on numpy.array
# while numpy.sum function works better on numpy.array than on list.
# So it will choose numpy.array as the default type for idxR and idx
if type(idxR) is not np.array:
idxR = np.array(idxR)
if type(idx) is not np.array:
idx = np.array(idx)
if idxR.ndim == 2 and 1 not in idxR.shape:
idxR = np.reshape(idxR, (-1, 1))
if idx.ndim == 1:
idx = np.reshape(idx, idxR.shape)
# Global accuracy
y = np.sum(idxR == idx)
ACC_glob = y / len(idxR)
# Accuracy per class
tab_acc = np.zeros((1, k))
"""
since in python the index starts from 0 not from 1,
here the idx(ind)==j in matlab will be change to idx[ind]==(j+1)
"""
for j in range(k):
ind = np.where(idxR == (j + 1))[0]
if len(ind) == 0:
tab_acc[0, j] = 0.0
else:
tab_acc[0, j] = int(np.sum(idx[ind] == (j + 1))) / len(ind)
return ACC_glob, tab_acc
def predict_L1(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 1)
# print(distmu)
# sns.kdeplot(np.array(distmu), shade=True, bw=0.1)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
return Ytest
# function to compute the \rho value
def predict_L1_molecule(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
confidence = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 1)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
confidence[i] = (distmu[0, 1] - distmu[0, 0]) / (distmu[0, 1] + distmu[0, 0])
return Ytest, confidence
# =============================Plot functions=================================================
# function to plot the distribution of \rho
def rhoHist(rho, n_equal_bins):
"""
# ===============================
#----- INPUT
# rho : df_confidence
# n_equal_bins : the number of histogram bins
#
#----- OUTPUT
# plt.show()
# ===============================
"""
# The leftmost and rightmost bin edges
first_edge, last_edge = rho.min(), rho.max()
bin_edges = np.linspace(
start=first_edge, stop=last_edge, num=n_equal_bins + 1, endpoint=True
)
_ = plt.hist(rho, bins=bin_edges)
plt.title("Histogram of confidence score")
plt.show()
def pd_plot(X, Yr, W, flag=None):
plt.figure()
X_transform = np.dot(X, W)
# cluster 1
index1 = np.where(Yr == 1)
X_1 = X_transform[index1[0], :]
c1 = np.mean(X_1, axis=0)
# plt.scatter(X_1[:,0],X_1[:,8],c='b', label='cluster1')
# cluster 2
index2 = np.where(Yr == 2)
X_2 = X_transform[index2[0], :]
c2 = np.mean(X_2, axis=0)
if flag == True:
plt.scatter(c1[0], c1[1], c="y", s=100, marker="*", label="center1")
plt.scatter(c2[0], c2[1], c="c", s=100, marker="*", label="center2")
plt.plot(X_1[:, 0], X_1[:, 1], "ob", label="cluster1")
plt.plot(X_2[:, 0], X_2[:, 1], "^r", label="cluster2")
plt.title("Primal_Dual")
plt.legend()
plt.show()
def pca_plot(X, Yr, W, flag=None):
plt.figure()
# if flag==True:
# X=np.dot(X,W)
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
X_norm = X_pca
# cluster 1
index1 = np.where(Yr == 1)
X_1 = X_norm[index1[0], :]
c1 = np.mean(X_1, axis=0)
# plt.scatter(X_1[:,0],X_1[:,8],c='b', label='cluster1')
# cluster 2
index2 = np.where(Yr == 2)
X_2 = X_norm[index2[0], :]
c2 = np.mean(X_2, axis=0)
# plt.scatter(X_2[:,0],X_2[:,8],c='g',label='cluster2')
if flag == True:
plt.scatter(c1[0], c1[1], c="y", s=100, marker="*", label="center1")
plt.scatter(c2[0], c2[1], c="c", s=100, marker="*", label="center2")
plt.plot(X_1[:, 0], X_1[:, 1], "ob", label="cluster1")
plt.plot(X_2[:, 0], X_2[:, 1], "^r", label="cluster2")
plt.title("PCA")
plt.legend()
plt.show()
def Predrejection(df_confidence, eps, num_eps):
"""
# =====================================================================
# It calculates the false rate according to the value of epsilon
#
#----- INPUT
# df_confidence : dataframe which contains predicted label,
# original label and rho
# eps : the threshold
# num_eps : the number of epsilon that can be tested
#----- OUTPUT
# FalseRate : An array that contains the falserate according to epsilon
# =====================================================================
"""
Yr = np.array(df_confidence["Yoriginal"])
Yr[np.where(Yr == 2)] = -1
Ypre = np.array(df_confidence["Ypred"])
Ypre[np.where(Ypre == 2)] = -1
rho = df_confidence["rho"]
epsList = np.arange(0, eps, eps / num_eps)
falseRate = []
rejectSample = []
for epsilon in epsList:
index = np.where((-epsilon < rho) & (rho < epsilon))
Yr[index] = 0
Ypre[index] = 0
Ydiff = Yr - Ypre
rejectRate = len(index[0]) / len(Yr)
error = len(np.where(Ydiff != 0)[0]) / len(Yr)
falseRate.append(error)
rejectSample.append(rejectRate)
plt.figure()
plt.plot(epsList, falseRate)
plt.xlabel("Confidence score prediction")
plt.ylabel("FN+FP (ratio)")
# plot the number of rejected samples
plt.figure()
plt.plot(epsList, rejectSample)
plt.xlabel("Confidence score prediction")
plt.ylabel(" Reject samples (ratio) ")
return np.array(falseRate)
# ==============================================================================
def predict_FISTA(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 2)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
return Ytest
def normest(X, tol=1.0e-6, maxiter=100):
# import necessary modules
import scipy.sparse
import numpy as np
import warnings
if scipy.sparse.issparse(X):
x = np.array(np.sum(np.abs(X), axis=0))
x = np.reshape(x, max(x.shape))
elif type(X) == np.matrix:
x = np.sum(np.abs(np.asarray(X)), axis=0)
x = np.reshape(x, max(x.shape))
else:
x = np.sum(np.abs(X), axis=0)
norm_e = np.linalg.norm(x)
if norm_e == 0:
return norm_e
x = x / norm_e
norm_e0 = 0
count = 0
while np.abs(norm_e - norm_e0) > tol * norm_e:
norm_e0 = norm_e
Xx = np.matmul(X, x)
if np.count_nonzero(Xx) == 0:
Xx = np.random.rand(Xx.shape[0])
x = np.matmul(X.T, Xx)
normx = np.linalg.norm(x)
norm_e = normx / np.linalg.norm(Xx)
x = x / normx
count += 1
if count > maxiter:
warnings.warn(
"Normest::NotConverge:the number of iterations exceeds {} times.\nThe error is {}, the tolerance is {}".format(
maxiter, np.abs(norm_e - norm_e0), tol
),
RuntimeWarning,
)
break
return norm_e
def merge_topGene_norm(topGenes, normW, clusternames):
"""
# =====================================================================
# It merge the two output from function select_features_w into a new
# pandas.DataFrame whose columns will be the elements in clusternames
# and each of the column will have two subcolumns: topGenes and weight
#
#----- INPUT
# topGenes : ndarray of top Genes chosen by select_features_w
# normW : normWeight of each genes given by select_features_w
# clusternames : A list of the names of each class.
#----- OUTPUT
# df_res : A DataFrame with each colum the first subcolumn the genes
# and second subcolumn their norm of weight
# =====================================================================
"""
if topGenes.shape != normW.shape:
raise ValueError("The dimension of the two input should be the same")
m, n = topGenes.shape
nbC = len(clusternames)
res = np.dstack((topGenes, normW))
res = res.reshape(m, 2 * n)
lst_col = []
for i in range(nbC):
lst_col.append((clusternames[i], "topGenes"))
lst_col.append((clusternames[i], "Weights"))
df_res = pd.DataFrame(res, columns=lst_col)
df_res.columns = pd.MultiIndex.from_tuples(
df_res.columns, names=["CluserNames", "Attributes"]
)
return df_res
def merge_topGene_norm_acc(
topGenes,
normW,
clusternames,
acctest,
nbr_features=30,
saveres=False,
file_tag=None,
outputPath="../results/",
):
"""
# =============================================================================================== \n
# Based on the function merge_topGebe_norm, replace the column name for \n
# normW by the accuracy \n
#----- INPUT \n
# topGenes (ndarray or DataFrame) : Top Genes chosen by select_features_w \n
# normW (ndarray or DataFrame) : The normWeight of each genes given by select_features_w \n
# clusternames (list or array) : A list of the names of each class \n
# acctest (list or array) : The list of the test accuracy \n
# saveres (optional, boolean) : True if we want to save the result to local \n
# file_tag (optional, string) : A file tag which will be the prefix of the file name \n
# outputPath (optional, string) : The output Path of the file \n
# ----- OUTPUT \n
# df_res : A DataFrame with each colum the first subcolumn the genes \n
# and second subcolumn their norm of weight \n
# =============================================================================================== \n
"""
if type(topGenes) is pd.DataFrame:
topGenes = topGenes.values
if type(normW) is pd.DataFrame:
normW = normW.values
if topGenes.shape != normW.shape:
raise ValueError("The dimension of the two input should be the same")
m, n = topGenes.shape
nbC = len(clusternames)
res = np.dstack((topGenes, normW))
res = res.reshape(m, 2 * n)
lst_col = []
acctest_mean = acctest.values.tolist()[4]
for i in range(nbC):
lst_col.append((clusternames[i], "topGenes"))
astr = str(acctest_mean[i])
lst_col.append((astr, "Weights"))
df_res = pd.DataFrame(res[0:nbr_features, :], columns=lst_col)
df_res.columns = pd.MultiIndex.from_tuples(
df_res.columns, names=["CluserNames", "Attributes"]
)
if saveres:
df_res.to_csv(
"{}{}_Heatmap of Acc_normW_Topgenes.csv".format(outputPath, file_tag),
sep=";",
)
return df_res
def compare_2topGenes(
topGenes1,
topGenes2,
normW1=None,
normW2=None,
lst_col=None,
nbr_limit=30,
printOut=False,
):
"""
#=======================================================================================
# Compare column by column the elements between to topGenes, it choose for
# each column first "nbr" elements to check.
# The two topGenes should be in same size of columns
# ----- INPUT
# topGenes1, topGenes2 (DataFrame) : Two topGenes to be compared
# normW1, normW2 (DataFrame,optional): Two matrix of weights correspondent. Default: None
# lst_col (list, optional) : If given, only the chosen column will be compared. Default: None
# nbr_limit (scalar, optional) : Number of the lines to be compared. Default: 30
# printOut (boolean, optional) : If True, the comparison result will be shown on screen. Default: False
# ----- OUTPUT
# out (string) : It returns a string of the comparing result as output.
#=======================================================================================
"""
import pandas as pd
import numpy as np
if type(topGenes1) != type(topGenes2):
raise ValueError("The two topGenes to be compared should be of the same type.")
if type(topGenes1) is not pd.DataFrame:
col = ["C" + str(i) for i in topGenes1.shape[1]]
topGenes1 = pd.DataFrame(topGenes1, columns=col)
topGenes2 = pd.DataFrame(topGenes2, columns=col)
out = []
out.append("Comparing the two TopGenes:\n")
# After the benchmark, the appended list and then converted to whole string seems to be the least consuming
list_name = list(topGenes1.columns)
if lst_col is not None:
list_name = [list_name[ind] for ind in lst_col]
for name in list_name:
out.append(
"{0:{fill}{align}40}\n".format(" Class %s " % name, fill="=", align="^")
)
col_1 = np.array(topGenes1[[name]], dtype=str)
col_2 = np.array(topGenes2[[name]], dtype=str)
# Here np.nozero will return a tuple of 2 array corresponding the first
# and the second dimension while the value of second dimension will
# always be 0. So the first dimension's last location+1 will be the length
# of nonzero arrays and that it's just the location of the first zero
# element
length_nonzero_1 = np.nonzero(col_1)[0][-1] + 1
length_nonzero_2 = np.nonzero(col_2)[0][-1] + 1
# np.nonzero will not detect '0.0' as zero type
if all(col_1 == "0.0"):
length_nonzero_1 = 0
if all(col_2 == "0.0"):
length_nonzero_2 = 0
length_min = min(length_nonzero_1, length_nonzero_2)
# Check if at least one of the classes contains only zero and avoid the error
if length_min == 0 and length_nonzero_1 == length_nonzero_2:
out.append(
"* Warning: No feature is selected for both two class\n Skipped for this class"
)
continue
elif length_min == 0 and length_nonzero_1 > 0:
out.append(
"* Warning: No feature is selected for this class in TopGenes2\n"
)
out.append(
"* All {} elements are included only in topGenes1:\n".format(
min(length_nonzero_1, nbr_limit)
)
)
for k in range(min(length_nonzero_1, nbr_limit)):
if normW1 is None:
out.append(" (%s)\n" % (str(col_1[k, 0])))
else:
out.append(
" (%s, %s)\n" % (str(col_1[k, 0]), normW1[[name]].iloc[k, 0])
)
continue
elif length_min == 0 and length_nonzero_2 > 0:
out.append(
"* Warning: No feature is selected for this class in TopGenes1\n"
)
out.append(
"* All {} elements are included only in topGenes2:\n".format(
min(length_nonzero_2, nbr_limit)
)
)
for k in range(min(length_nonzero_2, nbr_limit)):
if normW2 is None:
out.append(" (%s)\n" % (str(col_2[k, 0])))
else:
out.append(
" (%s, %s)\n" % (str(col_2[k, 0]), normW2[[name]].iloc[k, 0])
)
continue
if length_min < nbr_limit:
length = length_min
out.append(
"* Warning: In this column, the 1st topGenes has {} nozero elements\n* while the 2nd one has {} nonzero elements\n".format(
length_nonzero_1, length_nonzero_2
)
)
out.append("* So only first %d elements are compared\n\n" % length_min)
else:
length = nbr_limit
set_1 = col_1[0:length]
set_2 = col_2[0:length]
set_common = np.intersect1d(set_1, set_2) # Have in common
set_o1 = np.setdiff1d(set_1, set_2) # Exclusively in topGenes1
set_o2 = np.setdiff1d(set_2, set_1) # Exclusively in topGenes2
lc = len(set_common)
# print exclusively in topGenes1
out.append(
"Included exclusively in first topGenes: {} elements in total.\n".format(
length - lc
)
)
if length - lc > 0:
if normW1 is None:
out.append("Details:(Name)\n")
else:
out.append("Details:(Name,Weight)\n")
idx_i, idx_j = np.where(topGenes1[[name]].isin(set_o1))
for i, j in zip(idx_i, idx_j):
if normW1 is None:
out.append(" (%s)\n" % str(set_1[i, j]))
else:
out.append(
" (%s, %s)\n"
% (str(set_1[i, j]), str(normW1[[name]].iloc[i, j]))
)
out.append("\nNumber of elements in common:{}\n".format(lc))
# print exclusively in topGenes1
out.append(
"\nIncluded exclusively in second topGenes: {} elements in total.\n".format(
length - lc
)
)
if length - lc > 0:
if normW2 is None:
out.append("Details:(Name)\n")
else:
out.append("Details:(Name,Weight)\n")
idx_i, idx_j = np.where(topGenes2[[name]].isin(set_o2))
for i, j in zip(idx_i, idx_j):
if normW2 is None:
out.append(" (%s)\n" % str(set_2[i, j]))
else:
out.append(
" (%s, %s)\n"
% (str(set_2[i, j]), str(normW2[[name]].iloc[i, j]))
)
out.append("{:-<40}\n".format(""))
out = "".join(out)
if printOut == True:
print(out)
return out
def heatmap_classification(
Ytest,
YR,
clusternames,
rotate=45,
draw_fig=False,
save_fig=False,
func_tag=None,
outputPath="../results/",
):
"""
#=====================================================
# It takes the predicted labels (Ytest), true labels (YR)
# and a list of the names of clusters (clusternames)
# as input and provide the heatmap matrix as the output
#=====================================================
"""
k = len(np.unique(YR)) # If we need to automatically find a k
Heatmap_matrix = np.zeros((k, k))
for i in np.arange(k) + 1:
for j in np.arange(k) + 1:
a = np.where(
Ytest[YR == i] == j, 1, 0
).sum() # number Ytest ==j where YR==i
b = np.where(YR == i, 1, 0).sum()
Heatmap_matrix[i - 1, j - 1] = a / b
# Plotting
if draw_fig == True:
plt.figure(figsize=(10, 6))
annot = False
if k > 10:
annot = False
if clusternames is not None:
axes = sns.heatmap(
Heatmap_matrix,
cmap="jet",
annot=annot,
fmt=".2f",
xticklabels=clusternames,
yticklabels=clusternames,
)
else:
axes = sns.heatmap(Heatmap_matrix, cmap="jet", annot=annot, fmt=".2f")
axes.set_xlabel("Predicted true positive", fontsize=14)
axes.set_ylabel("Ground true", fontsize=14)
axes.tick_params(labelsize=7)
plt.xticks(rotation=rotate)
axes.set_title("Heatmap of confusion Matrix", fontsize=14)
plt.tight_layout()
if save_fig == True:
plt.savefig(
"{}{}_Heatmap_of_confusion_Matrix.png".format(outputPath, func_tag)
)
return Heatmap_matrix
def heatmap_normW(
normW,
clusternames=None,
nbr_l=10,
rotate=45,
draw_fig=False,
save_fig=False,
func_tag=None,
outputPath="../results/",
):
"""
#=====================================================
# It takes the predicted labels (Ytest), true labels (YR)
# and the number of clusters (k) as input and provide the
# heatmap matrix as the output
#=====================================================
"""
A = np.abs(normW)
AN = A / A[0, :]
if normW.shape[0] < nbr_l:
nbr_l = normW.shape[0]
ANR = AN[0:nbr_l, :]
annot = False
if draw_fig == True:
plt.figure(figsize=(10, 6))
# axes2=sns.heatmap(ANR,cmap='jet',annot=annot,fmt='.3f')
if clusternames is None:
axes2 = sns.heatmap(
ANR,
cmap="jet",
annot=annot,
fmt=".3f",
yticklabels=np.linspace(1, nbr_l, num=nbr_l, endpoint=True, dtype=int),
)
else:
axes2 = sns.heatmap(
ANR,
cmap="jet",
annot=annot,
fmt=".3f",
xticklabels=clusternames,
yticklabels=np.linspace(1, nbr_l, num=nbr_l, endpoint=True, dtype=int),
)
plt.xticks(rotation=rotate)
axes2.set_ylabel("Features", fontsize=14)
axes2.set_xlabel("Clusters", fontsize=14)
axes2.tick_params(labelsize=7)
axes2.set_title("Heatmap of Matrix W", fontsize=14)
plt.tight_layout()
if save_fig == True:
plt.savefig("{}{}_Heatmap_of_signature.png".format(outputPath, func_tag))
return ANR
def drop_cells_with_ID(X, Y, ID, n_fold):
"""
# ====================================================================
# This function will detect whether the size of the first dimension of
# X is divisible by n_fold. If not, it will remove the n_diff rows from
# the biggest class(with the largest size in Y) where n_diff=len(Y)%n_fold
#
# ---- Input
# X : The data
# Y : The label
# n_fold : The number of fold
# --- Output
# X_new, Y_new : The new data and the new label
# =====================================================================
"""
m, d = X.shape
if m % n_fold == 0:
return X, Y, ID
n_diff = m % n_fold
# choose in the biggest class to delete
# Find the biggest class
lst_count = []
for i in np.unique(Y):
lst_count.append(np.where(Y == i, 1, 0).sum())
ind_max = np.unique(Y)[np.argmax(lst_count)]
lst_inds = np.where(Y == ind_max)[0]
# Delete n_diff elements in the biggest class
lst_del = np.random.choice(lst_inds, n_diff)
X_new = np.delete(X, lst_del, 0)
Y_new = np.delete(Y, lst_del, 0)
ID_new = np.delete(ID, lst_del, 0)
return X_new, Y_new, ID_new
def drop_cells(X, Y, n_fold):
"""
# ====================================================================
# This function will detect whether the size of the first dimension of
# X is divisible by n_fold. If not, it will remove the n_diff rows from
# the biggest class(with the largest size in Y) where n_diff=len(Y)%n_fold
#
# ---- Input
# X : The data
# Y : The label
# n_fold : The number of fold
# --- Output
# X_new, Y_new : The new data and the new label
# =====================================================================
"""
m, d = X.shape
if m % n_fold == 0:
return X, Y
n_diff = m % n_fold
# choose in the biggest class to delete
# Find the biggest class
lst_count = []
for i in np.unique(Y):
lst_count.append(np.where(Y == i, 1, 0).sum())
ind_max = np.unique(Y)[np.argmax(lst_count)]
lst_inds = np.where(Y == ind_max)[0]
# Delete n_diff elements in the biggest class
lst_del = np.random.choice(lst_inds, n_diff)
X_new = np.delete(X, lst_del, 0)
Y_new = np.delete(Y, lst_del, 0)
return X_new, Y_new
# ===================== Algorithms =======================================
def FISTA_Primal(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# niter : The number of iterations
# gamma : The hyper parameter gamma
# eta : The eta to calculate the projection on l1 ball
# * isEpsilon is not used in the original file in Matlab
# --- Output
# w : The projection matrix
# mu : The centers
# nbGenes_fin : The number of genes of the final step
# loss : The loss for each iteration
# ====================================================================
"""
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = ["niter", "eta", "gamma"] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta = param["eta"]
gamma = param["gamma"]
n, d = X.shape
# === With class2indicator():
# Y = class2indicator(YR,k)
# === With Onehotencoder:
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
loss = np.zeros(niter)
XtX = np.matmul(X.T, X)
XtY = np.matmul(X.T, Y)
w_old = np.ones((d, k))
w_loc = w_old
t_old = 1
for i in range(niter):
grad_w = np.matmul(XtX, w_loc) - XtY
# gradient step
V = w_loc - gamma * grad_w
V = np.reshape(V, d * k)
# Projection on the l1 ball
V = proj_l1ball(V, eta)
# Reshape back
w_new = np.reshape(V, (d, k))
# Chambolle method
t_new = (i + 6) / 4 # or i+6 since pyhton starts from 0 ?
w_loc_new = w_new + ((t_old - 1) / t_new) * (w_new - w_old)
w_old = w_new
w_loc = w_loc_new
t_old = t_new
loss[i] = np.linalg.norm(Y - np.matmul(X, w_loc), "fro") ** 2
# end iteratons
w = w_loc
mu = centroids(np.matmul(X, w), YR, k)
nbGenes_fin = nb_Genes(w)[0]
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss
def primal_dual_L1N(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# param : A type dict paramter which must have keys:
# 'niter', 'eta', 'tau', 'rho','sigma', 'beta', 'tau2' and 'delta'
# Normally speaking:
# (The default value for beta is 0.25.)
# (IF not given, the value of the 'tau2' will be calculated by
# tau2 = 0.25*(1/(np.sqrt(m)*normY)). Note that this normY is
# the 2-norm of the OneHotEncode of the YR given.)
# (Default value of the 'delta' is 1.0)
# --- Output
# w : The projection matrix of size (d,k)
# mu : The centers of classes
# nbGenes_fin : The number of genes of the final result
# loss : The loss for each iteration
# Z : The dual matrix of size (m,k)
# =====================================================================
"""
m, d = X.shape
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
# normY = np.linalg.norm(Y,2)
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = [
"niter",
"eta",
"tau",
"rho",
"sigma",
"delta",
"tau2",
"beta",
] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta = param["eta"]
tau = param["tau"]
rho = param["rho"]
sigma = param["sigma"]
delta = param["delta"]
tau2 = param["tau2"]
# beta = param['beta']
# === END check block ===
# Initialization
w_old = np.ones((d, k))
Z_old = np.ones((m, k))
mu_old = np.eye(k, k)
Ik = np.eye(k, k)
loss = np.zeros(niter)
# Main Block
for i in range(niter):
V = w_old + tau * np.matmul(X.T, Z_old)
# Reshape
V = np.reshape(V, d * k)
V = proj_l1ball(V, eta)
V[np.where(np.abs(V) < 0.001)] = 0
# Reshape back
w_new = np.reshape(V, (d, k))
# no gamma here
# w_new = w_new + gamma*(w_new - w_old) =>
w = 2 * w_new - w_old
mu_new = (mu_old + rho * tau2 * Ik - tau2 * np.matmul(Y.T, Z_old)) / (
1 + tau2 * rho
)
# mu = mu_new + gamma*(mu_new - mu_old) =>
mu = 2 * mu_new - mu_old
Z = (Z_old + sigma * (np.matmul(Y, mu) - np.matmul(X, w))) / (1 + sigma * delta)
Z_new = np.maximum(np.minimum(Z, 1), -1)
mu_old = mu_new
w_old = w_new
Z_old = Z_new
loss[i] = np.linalg.norm(
np.matmul(Y, mu_new) - np.matmul(X, w_new), 1
) + 0.5 * (np.linalg.norm(Ik - mu_new, "fro") ** 2)
# End loop
Z = Z_old
w = w_new
mu = mu_new
nbGenes_fin = nb_Genes(w)[0]
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss, Z
def primal_dual_Nuclear(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# param : A type dict paramter which must have keys:
# 'niter', 'eta_star', 'tau', 'rho','sigma', 'tau2','delta'
# and 'gamma'
# Normally speaking:
# (The default value for beta is 0.25.)
# (IF not given, the value of the 'tau2' will be calculated by
# tau2 = 0.25*(1/(np.sqrt(m)*normY)). Note that this normY is
# the 2-norm of the OneHotEncode of the YR given.)
# (Default value of the 'delta' is 1.0)
# --- Output
# w : The projection matrix of size (d,k)
# mu : The centers of classes
# nbGenes_fin : The number of genes of the final result
# loss : The loss for each iteration
# Z : The dual matrix of size (m,k)
# =====================================================================
"""
m, d = X.shape
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = [
"niter",
"eta_star",
"tau",
"rho",
"sigma",
"tau2",
"beta",
] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta_star = param["eta_star"]
delta = param["delta"]
tau = param["tau"]
rho = param["rho"]
sigma = param["sigma"]
tau2 = param["tau2"]
# === END check block ===
# Initialization
w_old = np.ones((d, k))
Z_old = np.ones((m, k))
mu_old = np.eye(k, k)
Ik = np.eye(k, k)
loss = np.zeros(niter)
# Main Block
for i in range(niter):
V = w_old + tau * np.matmul(X.T, Z_old)
# Nuclear constraint
L, S0, R = np.linalg.svd(V, full_matrices=False)
norm_nuclear = S0.sum()
vs1 = proj_l1ball(S0.reshape((-1,)), eta_star)
S1 = vs1.reshape(S0.shape)
w = np.matmul(L, S1[..., None] * R)
w = 2 * w - w_old
mu_new = (mu_old + rho * tau2 * Ik - tau2 * np.matmul(Y.T, Z_old)) / (
1 + tau2 * rho
)
mu = 2 * mu_new - mu_old
Z = (Z_old + sigma * (np.matmul(Y, mu) - np.matmul(X, w))) / (1 + sigma * delta)
Z_new = np.maximum(np.minimum(Z, 1), -1)
mu_old = mu_new
w_old = w
Z_old = Z_new
loss[i] = np.linalg.norm(np.matmul(Y, mu_new) - np.matmul(X, w), 1) + 0.5 * (
np.linalg.norm(Ik - mu_new, "fro") ** 2
)
# End loop
Z = Z_old
mu = mu_new
nbGenes_fin, _ = nb_Genes(w)
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss, Z
# ================================== Part 2 ====================================
# ===================== Base Launch functions (scripts) ========================
def basic_run_eta(
func_algo,
func_predict,
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta=None,
eta_star=None,
gamma=1,
nfold=4,
rng=1,
showres=True,
keepfig=False,
saveres=False,
outputPath="../results/",
):
"""
# =====================================================================
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# - func_algo (necessary) : The function of the algorithm
# - func_predict (necessary) : The function to predict
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma, etc (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - showres (optional) : Boolean value. True if we want to show
# the results, plot the figures etc.
#
# - saveres (optional) : Boolean value. True to save the results
#
# - outputPath (optional) : String value. The output path.
#
# - Output:
# - mu : The centroids
# - nbm : Number of genes
# - accG : Global accuracy
# - loss : Loss for each iterations
# - W_mean : Mean weight matrix for all folds
# - timeElapsed : Time elapsed for one fold
# - (And the tables) : df_topGenes, df_normW, df_topG_normW,
# df_topGenes_mean, df_normW_mean,
# df_topG_normW_mean, df_acctest
# ======================================================================
"""
np.random.seed(rng) # reproducible
if not os.path.exists(outputPath): # make the directory if it does not exist
os.makedirs(outputPath)
n, d = X.shape
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
# Normalize the mean of datas (Deprecated)
# m = np.mean(X,axis=0)
# X = X-m
# normX = normest(X)
# X = X/normX
# YR = np.array(YR).reshape(-1,1)
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY ** 2
# Dropping the cells randomly if the n%d is not zero
# For more details please see instructions in drop_cells
X, YR = drop_cells(X, YR, nfold)
param = {}
param["niter"] = niter
param["rho"] = rho
param["tau"] = tau
tau2 = beta * (1 / (np.sqrt(n) * normY))
param["tau2"] = tau2
eps = 1 / (1 + tau2 * rho * 0.25)
sigma = 1.0 / (tau + (tau2 * eps * normY2)) # Converge until 2.6 for L1Nel
param["sigma"] = sigma
param["delta"] = delta
param["beta"] = beta
param["eta"] = eta
param["eta_star"] = eta_star
param["gamma"] = gamma
# Initialization
nbG = np.zeros(nfold, dtype=int) # Number of genes for each fold
accuracy_train = np.zeros((nfold, k + 1))
accuracy_test = np.zeros((nfold, k + 1))
W0 = np.zeros((d, k, nfold)) # w in each fold
mu0 = np.zeros((k, k, nfold))
W_mean = np.zeros((d, k))
# Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
# Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold, niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
print("\nStarts trainning for")
print("{:>6}:{:<6}".format("niter", niter))
if "fista" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "or" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "_l2" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
elif "nuclear" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta_star", eta_star))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
else:
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
Y_PDS = np.zeros(YR.shape)
meanclassi = np.zeros(nfold)
kf = KFold(n_splits=nfold, random_state=rng, shuffle=True)
w_all, mu_all, nbGenes_all, loss_all = func_algo(X, YR, k, param)[0:4]
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
print("-> {} classification...".format(func_algo.__name__))
# ========== Training =========
Xtrain = X[train_ind]
Xtest = X[test_ind]
Ytrain = YR[train_ind]
Ytest = YR[test_ind]
startTime = time.perf_counter()
w, mu, nbGenes, loss = func_algo(Xtrain, Ytrain, k, param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
print("-> Completed.\n-> Time Elapsed:{:.4}s".format(timeElapsed))
W0[:, :, i] = w
mu0[:, :, i] = mu
# Z0[:,:,i] = Z
loss_iter0[i, :] = loss
# ========== Accuracy =========
Ytrain_pred = func_predict(Xtrain, w, mu)
Ytest_pred = func_predict(Xtest, w, mu)
accuracy_train[i, 0], accuracy_train[i, 1 : k + 1] = compute_accuracy(
Ytrain, Ytrain_pred, k
)
accuracy_test[i, 0], accuracy_test[i, 1 : k + 1] = compute_accuracy(
Ytest, Ytest_pred, k
)
meanclassi[i] = np.mean(accuracy_test[i, 1 : k + 1])
nbG[i] = nbGenes
Y_PDS[test_ind] = Ytest_pred
print("{:-<30}".format(""))
# end kfold loop
nbm = int(nbG.mean())
accG = np.mean(accuracy_test[:, 0], axis=0)
Meanclass = meanclassi.mean()
W_mean = np.mean(W0, axis=2)
mu_mean = np.mean(mu0, axis=2)
# Z_mean= np.mean(Z0,axis=2)
normfro = np.linalg.norm(w, "fro")
print("Training step ends.\n")
# Class size
Ctab = []
size_class = np.zeros(k) # Size of each class (real)
size_class_est = np.zeros(k) # Size of each class (estimated)
for j in range(k):
size_class[j] = (YR == (j + 1)).sum()
size_class_est[j] = (Y_PDS == (j + 1)).sum()
Ctab.append("Class {}".format(j + 1))
df_szclass = pd.DataFrame(size_class, index=Ctab, columns=["Class Size"])
df_szclass_est = pd.DataFrame(size_class_est, index=Ctab, columns=["Class Size"])
# Data accuracy
accuracy_train = np.vstack((accuracy_train, np.mean(accuracy_train, axis=0)))
accuracy_test = np.vstack((accuracy_test, np.mean(accuracy_test, axis=0)))
ind_df = []
for i_fold in range(nfold):
ind_df.append("Fold {}".format(i_fold + 1))
ind_df.append("Mean")
columns = ["Global"]
if clusternames is None:
columns += Ctab
else:
columns += clusternames
df_accTrain = pd.DataFrame(accuracy_train, index=ind_df, columns=columns)
df_acctest = pd.DataFrame(accuracy_test, index=ind_df, columns=columns)
# Feature selection
print("Selecting features from whole dataset...", end="")
w, mu, nbGenes, loss = func_algo(X, YR, k, param)[0:4]
topGenes, normW = select_feature_w(w, genenames)
topGenes_mean, normW_mean = select_feature_w(W_mean, genenames)
# Mean of each fold
df_topGenes_mean = pd.DataFrame(topGenes_mean, columns=clusternames)
df_normW_mean = pd.DataFrame(normW_mean, columns=clusternames)
df_topG_normW_mean = merge_topGene_norm(topGenes_mean, normW_mean, clusternames)
# All data
df_topGenes = pd.DataFrame(topGenes, columns=clusternames)
df_normW = pd.DataFrame(normW, columns=clusternames)
df_topG_normW = merge_topGene_norm(topGenes, normW, clusternames)
print("Completed.\n")
# Two heatmaps
M_heatmap_classification = heatmap_classification(
Y_PDS, YR, clusternames, rotate=60
)
M_heatmap_signature = heatmap_normW(normW, clusternames, nbr_l=30, rotate=60)
# Results
if showres == True:
print("Size class (real):")
print(df_szclass)
print("\nSize class (estimated):")
print(df_szclass_est)
print("\nAccuracy Train")
print(df_accTrain)
print("\nAccuracy Test")
print(df_acctest)
if keepfig == False:
plt.close("all")
fig_lossIter = plt.figure(figsize=(8, 6))
plt.plot(np.arange(niter, dtype=int) + 1, loss)
msg_eta = "$\eta$:%d" % eta if eta is not None else ""
msg_etaS = "$\eta*$:%d" % eta_star if eta_star is not None else ""
plt.title(
"loss for each iteration {} {}\n ({})".format(
msg_eta, msg_etaS, func_algo.__name__
),
fontsize=18,
)
plt.ylabel("Loss", fontsize=18)
plt.xlabel("Iteration", fontsize=18)
plt.xticks(np.linspace(1, niter, num=6, endpoint=True, dtype=int))
plt.xlim(left=1, right=niter)
plt.ylim((0, 1))
# Saving Result
if saveres == True:
# define two nametags
nametag_eta = "_eta-%d" % eta if eta is not None else ""
nametag_etaS = "_etaStar-%d" % eta_star if eta_star is not None else ""
# save loss
filename_loss = "loss_{}_beta-{}_delta-{}{}{}_niter-{}.txt".format(
func_algo.__name__, beta, delta, nametag_eta, nametag_etaS, niter
)
np.savetxt(outputPath + filename_loss, loss)
# define function name tag for two heatmaps
func_tag = func_algo.__name__ + nametag_eta + nametag_etaS
# Save heatmaps
filename_heat = "{}{}_Heatmap_of_confusion_Matrix.npy".format(
outputPath, func_tag
)
np.save(filename_heat, M_heatmap_classification)
filename_heat = "{}{}_Heatmap_of_signature_Matrix.npy".format(
outputPath, func_tag
)
np.save(filename_heat, M_heatmap_signature)
df_acctest.to_csv(
"{}{}{}{}_AccuracyTest.csv".format(
outputPath, func_algo.__name__, nametag_eta, nametag_etaS
),
sep=";",
)
df_topG_normW.to_csv(
"{}{}{}{}_TopGenesAndNormW.csv".format(
outputPath, func_algo.__name__, nametag_eta, nametag_etaS
),
sep=";",
)
# Other possiblilities to save
# fig_lossIter.savefig('{}{}{}{}_niter-{}_loss_iters.png'.format(outputPath,func_algo.__name__,nametag_eta,nametag_etaS,niter))
# All data
# df_topGenes.to_csv('{}{}_TopGenes.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_normW.to_csv('{}{}_NormW.csv'.format(outputPath,func_algo.__name__),sep=';')
# Mean of each fold
# df_topGenes_mean.to_csv('{}{}_TopGenes_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_normW_mean.to_csv('{}{}_NormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_topG_normW_mean.to_csv('{}{}_TopGenesAndNormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
return (
mu_mean,
nbm,
accG,
loss,
W_mean,
timeElapsed,
df_topGenes,
df_normW,
df_topG_normW,
df_topGenes_mean,
df_normW_mean,
df_topG_normW_mean,
df_acctest,
w_all,
)
# ===================== ========================================================
def getPredLabel(Ypred):
for i in range(Ypred.shape[0]):
if Ypred[i] > 1.5:
Ypred[i] = 2
if Ypred[i] <= 1.5:
Ypred[i] = 1
return Ypred
# =====================Functions used to compare different algorithms========================================================
def getCoefs(alg, model):
if alg == "RF":
coef = model.feature_importances_
if alg == "svm":
coef = model.coef_.transpose()
if alg == "plsda":
coef = model.coef_
return coef
# =====================Functions used to compute the ranked features and their weights=======================
def TopGenbinary(w, feature_names):
n = len(w)
difference = np.zeros(n)
for i in range(n):
difference[i] = w[i][0] - w[i][1]
df1 = pd.DataFrame(feature_names, columns=["pd"])
df1["weights"] = difference
# =====Sort the difference based on the absolute value=========
df1["sort_helper"] = df1["weights"].abs()
df2 = df1.sort_values(by="sort_helper", ascending=False).drop("sort_helper", axis=1)
# ==== end_sort=============
return df2
def rankFeatureHelper(alg, coef, feature_names):
df1 = pd.DataFrame(feature_names, columns=[alg])
df1["weights"] = coef
df1["sort_helper"] = df1["weights"].abs()
df2 = df1.sort_values(by="sort_helper", ascending=False).drop("sort_helper", axis=1)
return df2
def rankFeatures(X, Yr, algList, feature_names):
# flag=0
featureList = []
for alg in algList:
if alg == "svm":
clf = SVC(probability=True, kernel="linear")
model = clf.fit(X, Yr.ravel())
coef = model.coef_.transpose()
df_rankFeature = rankFeatureHelper(alg, coef, feature_names)
featureList.append(df_rankFeature)
if alg == "RF":
clf = RandomForestClassifier(n_estimators=400, random_state=10, max_depth=3)
model = clf.fit(X, Yr.ravel())
coef = model.feature_importances_
df_rankFeature = rankFeatureHelper(alg, coef, feature_names)
featureList.append(df_rankFeature)
if alg == "plsda":
clf = PLSRegression(n_components=4, scale=False)
model = clf.fit(X, Yr.ravel())
coef = model.coef_
df_rankFeature = rankFeatureHelper(alg, coef, feature_names)
featureList.append(df_rankFeature)
# if flag == 0:
# df_rankFeature = TopGenbinary(coef, feature_names)
# flag =1
# else:
# df_feature = TopGenbinary(coef, feature_names)
# df_rankFeature
return featureList
# ===============================Compute the \rho==============================
def basic_run_eta_molecule(
X,
YR,
ID,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta=500,
gamma=1,
nfold=4,
random_seed=1,
):
"""
# =====================================================================
# This function is used to compute the df_confidence
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# The function of the algorithm: primal_dual_L1N
# The function to predict: predict_L1_molecule
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - Output:
# - Yprediction : list of Predicted labels
# ======================================================================
"""
np.random.seed(random_seed) # reproducible
n, d = X.shape
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY ** 2
# Dropping the cells randomly if the n%d is not zero
# See more details in drop_cells
X, YR, Ident = drop_cells_with_ID(X, YR, ID, nfold)
dico = dict(list(enumerate(Ident)))
ref = pd.DataFrame.from_dict(dico, orient="index")
param = {}
param["niter"] = niter
param["rho"] = rho
param["tau"] = tau
tau2 = beta * (1 / (np.sqrt(n) * normY))
param["tau2"] = tau2
eps = 1 / (1 + tau2 * rho * 0.25)
sigma = 1.0 / (tau + (tau2 * eps * normY2)) # Converge until 2.6 for L1Nel
param["sigma"] = sigma
param["delta"] = delta
param["beta"] = beta
param["eta"] = eta
param["gamma"] = gamma
# Initialization
nbG = np.zeros(nfold, dtype=int) # Number of genes for each fold
W0 = np.zeros((d, k, nfold)) # w in each fold
mu0 = np.zeros((k, k, nfold))
# Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
# Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold, niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
print("\nStarts trainning for")
print("{:>6}:{:<6}".format("niter", niter))
print("{:>6}:{:<6}".format("eta", eta))
if "fista" in primal_dual_L1N.__name__.lower():
print("{:>6}:{:<6}".format("gamma", delta))
elif "or" in primal_dual_L1N.__name__.lower():
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "_l2" in primal_dual_L1N.__name__.lower():
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
else:
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
Yprediction = []
Confidence = []
# accuracy_train = np.zeros((nfold,k+1))
# accuracy_test = np.zeros((nfold,k+1))
ID = []
Ident = []
kf = KFold(n_splits=nfold, random_state=random_seed, shuffle=True)
w_all, mu_all, nbGenes_all, loss_all = primal_dual_L1N(X, YR, k, param)[0:4]
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
print("-> {} classification...".format(primal_dual_L1N.__name__))
# ========== Training =========
dico = dico
Xtrain = X[train_ind]
Ytrain = YR[train_ind]
Xtest = X[test_ind]
startTime = time.perf_counter()
w, mu, nbGenes, loss = primal_dual_L1N(Xtrain, Ytrain, k, param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
print("-> Completed.\n-> Time Elapsed:{:.4}s".format(timeElapsed))
W0[:, :, i] = w
mu0[:, :, i] = mu
loss_iter0[i, :] = loss
# ========== Prediction =========
Ypred, conf = predict_L1_molecule(Xtest, w, mu)
Yprediction.append(Ypred)
Confidence.append(conf)
ID.append(test_ind)
Ident.append(ref.iloc[test_ind])
nbG[i] = nbGenes
print("{:-<30}".format(""))
# end kfold loop
return Yprediction, Confidence, ID, Ident, YR, ref
# ===================== Base Launch functions (scripts) ========================
def basic_run_eta_compare(
func_algo,
func_predict,
X,
YR,
k,
alglist,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta=None,
eta_star=None,
gamma=1,
nfold=4,
rng=1,
showres=False,
keepfig=False,
saveres=False,
outputPath="../results/",
):
"""
# =====================================================================
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# - func_algo (necessary) : The function of the algorithm
# - func_predict (necessary) : The function to predict
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma, etc (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - showres (optional) : Boolean value. True if we want to show
# the results, plot the figures etc.
#
# - saveres (optional) : Boolean value. True to save the results
#
# - alglist (optional) : The seed to control the random funcion
#
# - outputPath (optional) : String value. The output path.
#
#
# - Output:
# - mu : The centroids
# - nbm : Number of genes
# - accG : Global accuracy
# - loss : Loss for each iterations
# - W_mean : Mean weight matrix for all folds
# - timeElapsed : Time elapsed for one fold
# - (And the tables) : df_topGenes, df_normW, df_topG_normW,
# df_topGenes_mean, df_normW_mean,
# df_topG_normW_mean, df_acctest
# ======================================================================
"""
np.random.seed(rng) # reproducible
if not os.path.exists(outputPath): # make the directory if it does not exist
os.makedirs(outputPath)
n, d = X.shape
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
# Normalize the mean of datas (Deprecated)
# m = np.mean(X,axis=0)
# X = X-m
# normX = normest(X)
# X = X/normX
# YR = np.array(YR).reshape(-1,1)
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY ** 2
# Dropping the cells randomly if the n%d is not zero
# For more details please see instructions in drop_cells
X, YR = drop_cells(X, YR, nfold)
param = {}
param["niter"] = niter
param["rho"] = rho
param["tau"] = tau
tau2 = beta * (1 / (np.sqrt(n) * normY))
param["tau2"] = tau2
eps = 1 / (1 + tau2 * rho * 0.25)
sigma = 1.0 / (tau + (tau2 * eps * normY2)) # Converge until 2.6 for L1Nel
param["sigma"] = sigma
param["delta"] = delta
param["beta"] = beta
param["eta"] = eta
param["eta_star"] = eta_star
param["gamma"] = gamma
# Initialization
nbG = np.zeros(nfold, dtype=int) # Number of genes for each fold
accuracy_train = np.zeros((nfold, k + 1))
accuracy_test = np.zeros((nfold, k + 1))
auc_train = np.zeros((nfold))
auc_test = np.zeros((nfold))
sil_train = np.zeros((nfold))
W0 = np.zeros((d, k, nfold)) # w in each fold
mu0 = np.zeros((k, k, nfold))
W_mean = np.zeros((d, k))
# Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
# Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold, niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
# print('\nStarts trainning for')
# print('{:>6}:{:<6}'.format('niter',niter))
Y_PDS = np.zeros(YR.shape)
meanclassi = np.zeros(nfold)
kf = KFold(n_splits=nfold, random_state=rng, shuffle=True)
numalg = len(alglist)
accuracy_train_comp = np.zeros((nfold, numalg))
accuracy_test_comp = np.zeros((nfold, numalg))
AUC_train_comp = np.zeros((nfold, numalg * 4))
AUC_test_comp = np.zeros((nfold, numalg * 4))
timeElapsedMatrix = np.zeros((nfold, numalg + 1))
w_all, mu_all, nbGenes_all, loss_all = func_algo(X, YR, k, param)[0:4]
# 4-flod cross validation
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
# ========== Training =========
Xtrain = X[train_ind]
Xtest = X[test_ind]
Ytrain = YR[train_ind]
Ytest = YR[test_ind]
Ytr = pd.get_dummies(Ytrain.ravel()).values.T.T
Yte = pd.get_dummies(Ytest.ravel())
startTime = time.perf_counter()
w, mu, nbGenes, loss = func_algo(Xtrain, Ytrain, k, param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
timeElapsedMatrix[i][numalg] = timeElapsed
print("-> Time Elapsed:{:.4}s".format(timeElapsed))
W0[:, :, i] = w
mu0[:, :, i] = mu
# Z0[:,:,i] = Z
loss_iter0[i, :] = loss
# ========== Accuracy =========
Ytrain_pred = func_predict(Xtrain, w, mu)
Ytest_pred = func_predict(Xtest, w, mu)
accuracy_train[i, 0], accuracy_train[i, 1 : k + 1] = compute_accuracy(
Ytrain, Ytrain_pred, k
)
accuracy_test[i, 0], accuracy_test[i, 1 : k + 1] = compute_accuracy(
Ytest, Ytest_pred, k
)
if (
np.unique(Ytest).shape[0] == 2
and np.unique(Ytest_pred.astype("int64")).shape[0] == 2
):
auc_test[i] = roc_auc_score(Ytest_pred.astype("int64"), Ytest)
auc_train[i] = roc_auc_score(Ytrain_pred.astype("int64"), Ytrain)
meanclassi[i] = np.mean(accuracy_test[i, 1 : k + 1])
nbG[i] = nbGenes
Y_PDS[test_ind] = Ytest_pred
# start loop of other algorithms' comparison
for j in range(numalg):
alg = alglist[j]
if alg == "svm":
tuned_parameters = [
{"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]},
{"kernel": ["linear"], "C": [1, 10, 100, 1000]},
]
clf = GridSearchCV(SVC(), tuned_parameters)
# clf = SVC(probability=True,kernel='linear')
if alg == "RF":
clf = RandomForestClassifier(
n_estimators=400, random_state=10, max_depth=3
)
if alg == "plsda":
clf = PLSRegression(n_components=4, scale=False)
# build the model
startTime = time.perf_counter()
# clf = OneVsRestClassifier(clf)
model = clf.fit(Xtrain, Ytrain.ravel())
# model = clf.fit(X,Ytr)
# if (alg == 'svm'):
# print(clf.best_params_)
endTime = time.perf_counter()
timeElapsedMatrix[i][j] = endTime - startTime
if k > 2:
Ypred_test = np.around(
model.predict(Xtest)
).ravel() # getPredLabel(model.predict(Xtest))
Ypred_train = np.around(
model.predict(Xtrain)
).ravel() # getPredLabel(model.predict(Xtrain))
else:
Ypred_test = getPredLabel(model.predict(Xtest))
Ypred_train = getPredLabel(model.predict(Xtrain))
accuracy_test_comp[i][j] = accuracy_score(Ypred_test.astype("int64"), Ytest)
accuracy_train_comp[i][j] = accuracy_score(
Ypred_train.astype("int64"), Ytrain
)
# print("sil = ", metrics.silhouette_score(model.x_scores_, Ypred_train) )
if alg == "plsda":
sil_train[i] = metrics.silhouette_score(model.x_scores_, Ypred_train)
if (
np.unique(Ytest).shape[0] == 2
and np.unique(Ypred_test.astype("int64")).shape[0] == 2
):
AUC_test_comp[i][j * 4] = roc_auc_score(
Ypred_test.astype("int64"), Ytest
)
AUC_train_comp[i][j * 4] = roc_auc_score(
Ypred_train.astype("int64"), Ytrain
)
# F1 precision recal
AUC_train_comp[i][
j * 4 + 1 : j * 4 + 4
] = metrics.precision_recall_fscore_support(
Ytrain, Ypred_train.astype("int64"), average="macro"
)[
:-1
]
AUC_test_comp[i][
j * 4 + 1 : j * 4 + 4
] = metrics.precision_recall_fscore_support(
Ytest, Ypred_test.astype("int64"), average="macro"
)[
:-1
]
# end kfold loop
nbm = int(nbG.mean())
accG = np.mean(accuracy_test[:, 0], axis=0)
Meanclass = meanclassi.mean()
W_mean = np.mean(W0, axis=2)
mu_mean = np.mean(mu0, axis=2)
# Z_mean= np.mean(Z0,axis=2)
normfro = np.linalg.norm(w, "fro")
# Class size
Ctab = []
size_class = np.zeros(k) # Size of each class (real)
size_class_est = np.zeros(k) # Size of each class (estimated)
for j in range(k):
size_class[j] = (YR == (j + 1)).sum()
size_class_est[j] = (Y_PDS == (j + 1)).sum()
Ctab.append("Class {}".format(j + 1))
df_szclass = pd.DataFrame(size_class, index=Ctab, columns=["Class Size"])
df_szclass_est = pd.DataFrame(size_class_est, index=Ctab, columns=["Class Size"])
# Data accuracy
accuracy_train = np.vstack((accuracy_train, np.mean(accuracy_train, axis=0)))
accuracy_test = np.vstack((accuracy_test, np.mean(accuracy_test, axis=0)))
# auc_train = np.vstack((auc_train,np.mean(auc_train,axis=0)))
# auc_test = np.vstack((auc_test,np.mean(auc_test,axis=0)))
ind_df = []
for i_fold in range(nfold):
ind_df.append("Fold {}".format(i_fold + 1))
ind_df.append("Mean")
columns = ["Global"]
if clusternames is None:
columns += Ctab
else:
columns += clusternames
df_accTrain = pd.DataFrame(accuracy_train, index=ind_df, columns=columns)
df_acctest = pd.DataFrame(accuracy_test, index=ind_df, columns=columns)
# Data accuracy1
ind_df_comp = []
for i_fold in range(nfold):
ind_df_comp.append("Fold {}".format(i_fold + 1))
df_comp = pd.DataFrame(accuracy_test_comp, index=ind_df_comp, columns=alglist)
df_comp.loc["Mean"] = df_comp.mean()
df_comp["pd"] = df_acctest["Global"]
colauc = []
for met in alglist:
colauc.append(met + " AUC")
colauc.append(met + " Precision")
colauc.append(met + " Recall")
colauc.append(met + " F1 score")
df_compauc = pd.DataFrame(AUC_test_comp, index=ind_df_comp, columns=colauc)
df_compauc["pd"] = auc_test
df_compauc["sil_plsda"] = sil_train
df_compauc.loc["Mean"] = df_compauc.mean()
alglen = len(alglist)
alglist1 = []
for i in range(alglen):
alglist1.append(alglist[i])
alglist1.append("pd")
df_timeElapsed = pd.DataFrame(
timeElapsedMatrix, index=ind_df_comp, columns=alglist1
)
df_timeElapsed.loc["Mean"] = df_timeElapsed.mean()
# Feature selection
print("Selecting features from whole dataset...", end="")
w, mu, nbGenes, loss = func_algo(X, YR, k, param)[0:4]
topGenes, normW = select_feature_w(w, genenames)
topGenes_mean, normW_mean = select_feature_w(W_mean, genenames)
# Mean of each fold
df_topGenes_mean = pd.DataFrame(topGenes_mean, columns=clusternames)
df_normW_mean = pd.DataFrame(normW_mean, columns=clusternames)
df_topG_normW_mean = merge_topGene_norm(topGenes_mean, normW_mean, clusternames)
# All data
df_topGenes = pd.DataFrame(topGenes, columns=clusternames)
df_normW = pd.DataFrame(normW, columns=clusternames)
df_topG_normW = merge_topGene_norm(topGenes, normW, clusternames)
print("Completed.\n")
# Two heatmaps
# M_heatmap_classification = heatmap_classification(Y_PDS,YR,clusternames,rotate=60)
# M_heatmap_signature = heatmap_normW(normW,clusternames,nbr_l=30,rotate=60)
# Results
if showres == True:
print("Size class (real):")
print(df_szclass)
print("\nSize class (estimated):")
print(df_szclass_est)
print("\nAccuracy Train")
print(df_accTrain)
print("\nAccuracy Test")
print(df_acctest)
if keepfig == False:
plt.close("all")
fig_lossIter = plt.figure(figsize=(8, 6))
plt.plot(np.arange(niter, dtype=int) + 1, loss)
msg_eta = "$\eta$:%d" % eta if eta is not None else ""
msg_etaS = "$\eta*$:%d" % eta_star if eta_star is not None else ""
plt.title(
"loss for each iteration {} {}\n ({})".format(
msg_eta, msg_etaS, func_algo.__name__
),
fontsize=18,
)
plt.ylabel("Loss", fontsize=18)
plt.xlabel("Iteration", fontsize=18)
plt.xticks(np.linspace(1, niter, num=6, endpoint=True, dtype=int))
plt.xlim(left=1, right=niter)
plt.ylim((0, 1))
# Saving Result
if saveres == True:
# define two nametags
nametag_eta = "_eta-%d" % eta if eta is not None else ""
nametag_etaS = "_etaStar-%d" % eta_star if eta_star is not None else ""
# save loss
# filename_loss = 'loss_{}_beta-{}_delta-{}{}{}_niter-{}.txt'.format(func_algo.__name__,beta,delta, nametag_eta,nametag_etaS,niter)
# np.savetxt(outputPath + filename_loss,loss)
# define function name tag for two heatmaps
# func_tag = func_algo.__name__ + nametag_eta + nametag_etaS
# Save heatmaps
# filename_heat = '{}{}_Heatmap_of_confusion_Matrix.npy'.format(outputPath,func_tag)
# np.save(filename_heat,M_heatmap_classification)
# filename_heat = '{}{}_Heatmap_of_signature_Matrix.npy'.format(outputPath,func_tag)
# np.save(filename_heat,M_heatmap_signature)
df_acctest.to_csv(
"{}{}{}{}_AccuracyTest.csv".format(
outputPath, func_algo.__name__, nametag_eta, nametag_etaS
),
sep=";",
)
df_topG_normW.to_csv(
"{}{}{}{}_TopGenesAndNormW.csv".format(
outputPath, func_algo.__name__, nametag_eta, nametag_etaS
),
sep=";",
)
# Other possiblilities to save
# fig_lossIter.savefig('{}{}{}{}_niter-{}_loss_iters.png'.format(outputPath,func_algo.__name__,nametag_eta,nametag_etaS,niter))
# All data
# df_topGenes.to_csv('{}{}_TopGenes.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_normW.to_csv('{}{}_NormW.csv'.format(outputPath,func_algo.__name__),sep=';')
# Mean of each fold
# df_topGenes_mean.to_csv('{}{}_TopGenes_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_normW_mean.to_csv('{}{}_NormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_topG_normW_mean.to_csv('{}{}_TopGenesAndNormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
return (
mu_mean,
nbm,
accG,
loss,
W_mean,
timeElapsed,
df_topGenes,
df_normW,
df_topG_normW,
df_topGenes_mean,
df_normW_mean,
df_topG_normW_mean,
df_acctest,
df_comp,
df_timeElapsed,
w_all,
df_compauc,
)
import warnings
def readData(filename):
DATADIR = "datas/"
# df_X = pd.read_csv(DATADIR+'LUNG.csv',delimiter=';', decimal=",",header=0,encoding="ISO-8859-1", low_memory=False)
df_X = pd.read_csv(
DATADIR + str(filename),
delimiter=";",
decimal=",",
header=0,
encoding="ISO-8859-1",
low_memory=False,
)
# df_X = pd.read_csv(DATADIR+'COVID.csv',delimiter=';', decimal=",",header=0,encoding="ISO-8859-1", low_memory=False)
df_names = df_X["Name"]
feature_names = df_names[1:].values.astype(str)
X = df_X.iloc[1:, 1:].values.astype(float).transpose()
Yr = df_X.iloc[0, 1:].values.astype(float)
nbr_clusters = len(np.unique(Yr))
feature_names = df_names.values.astype(str)[1:]
label_name = df_names
for index, label in enumerate(
label_name
): # convert string labels to numero (0,1,2....)
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=FutureWarning)
Yr = np.where(Yr == label, index, Yr)
Yr = Yr.astype(np.int64)
return X, Yr, nbr_clusters, feature_names
def basic_run_other(
X,
YR,
k,
alglist,
genenames=None,
clusternames=None,
nfold=4,
rng=6,
doTopGenes=False,
):
np.random.seed(rng) # reproducible
n, d = X.shape # n is never used
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
# Dropping the cells randomly if the n%d is not zero
# For more details please see instructions in drop_cells
X, YR = drop_cells(X, YR, nfold)
# Initialization
sil_train = np.zeros((nfold))
kf = KFold(n_splits=nfold, random_state=rng, shuffle=True)
numalg = len(alglist)
accuracy_train_comp = np.zeros((nfold, numalg))
accuracy_test_comp = np.zeros((nfold, numalg))
AUC_train_comp = np.zeros((nfold, numalg * 4))
AUC_test_comp = np.zeros((nfold, numalg * 4))
timeElapsedMatrix = np.zeros((nfold, numalg))
top_features_list = []
# 4-flod cross validation
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
# ========== Training =========
Xtrain = X[train_ind]
Xtest = X[test_ind]
Ytrain = YR[train_ind]
Ytest = YR[test_ind]
# start loop of other algorithms' comparison
for j, alg in enumerate(alglist):
get_features = lambda m: None
if alg == "svm":
tuned_parameters = [
{"kernel": ["linear"], "C": [1, 10, 100, 1000]},
]
clf = GridSearchCV(SVC(), tuned_parameters)
get_features = lambda m: m.best_estimator_.coef_.transpose()
if alg == "RF":
clf = RandomForestClassifier(
n_estimators=400, random_state=10, max_depth=3
)
get_features = lambda m: m.feature_importances_
if alg == "plsda":
clf = PLSRegression(n_components=4, scale=False)
get_features = lambda m: m.coef_
if alg == "logreg":
clf = LogisticRegression(C=10)
get_features = lambda m: m.coef_.transpose()
if alg == "NN":
clf = KNeighborsClassifier(n_neighbors=50)
if alg == "GaussianNB":
clf = GaussianNB(var_smoothing=1e-9) # var smoothing to be tuned
if alg == "Adaboost":
clf = AdaBoostClassifier(n_estimators=100) # parameters to be tuned
get_features = lambda m: m.feature_importances_
if alg == "Lasso":
lasso = Lasso(random_state=0, max_iter=10000)
alphas = np.logspace(-4, -0.5, 20)
tuned_parameters = [{"alpha": alphas}]
n_folds = 5
clf = GridSearchCV(lasso, tuned_parameters, cv=n_folds)
get_features = lambda m: m.best_estimator_.coef_
# build the model
startTime = time.perf_counter()
model = clf.fit(Xtrain, Ytrain.ravel())
endTime = time.perf_counter()
timeElapsedMatrix[i][j] = endTime - startTime
if k > 2:
Ypred_test = np.around(
model.predict(Xtest)
).ravel() # getPredLabel(model.predict(Xtest))
Ypred_train = np.around(
model.predict(Xtrain)
).ravel() # getPredLabel(model.predict(Xtrain))
else:
Ypred_test = getPredLabel(model.predict(Xtest)).ravel()
Ypred_train = getPredLabel(model.predict(Xtrain)).ravel()
accuracy_test_comp[i][j] = accuracy_score(Ypred_test.astype("int64"), Ytest)
accuracy_train_comp[i][j] = accuracy_score(
Ypred_train.astype("int64"), Ytrain
)
if alg == "plsda":
sil_train[i] = metrics.silhouette_score(model.x_scores_, Ypred_train)
if (
np.unique(Ytest).shape[0] == 2
and np.unique(Ypred_test.astype("int64")).shape[0] == 2
):
AUC_test_comp[i][j * 4] = roc_auc_score(
Ypred_test.astype("int64"), Ytest
)
AUC_train_comp[i][j * 4] = roc_auc_score(
Ypred_train.astype("int64"), Ytrain
)
# F1 precision recall
# Note: for some models, these are not defined
# (for example, the Lasso)
# In those cases, the undefined scores are set to 0,
# And no warning is raised
# Cf. the zero_division=0 parameter.
AUC_train_comp[i][
j * 4 + 1 : j * 4 + 4
] = metrics.precision_recall_fscore_support(
Ytrain, Ypred_train.astype("int64"), average="macro", zero_division=0
)[
:-1
]
AUC_test_comp[i][
j * 4 + 1 : j * 4 + 4
] = metrics.precision_recall_fscore_support(
Ytest, Ypred_test.astype("int64"), average="macro", zero_division=0
)[
:-1
]
# get the topgenes from the first fold
if i == 0 and doTopGenes:
coef = get_features(clf)
if coef is not None:
df_rankFeature = rankFeatureHelper(alg, coef, genenames)
else:
df_rankFeature = rankFeatureHelper(
alg, [0] * len(genenames), genenames
)
top_features_list.append(df_rankFeature)
# Data accuracy1
ind_df_comp = []
for i_fold in range(nfold):
ind_df_comp.append("Fold {}".format(i_fold + 1))
df_comp = pd.DataFrame(accuracy_test_comp, index=ind_df_comp, columns=alglist)
df_comp.loc["Mean"] = df_comp.mean()
colauc = []
for met in alglist:
colauc.append(met + " AUC")
colauc.append(met + " Precision")
colauc.append(met + " Recall")
colauc.append(met + " F1 score")
df_compauc = pd.DataFrame(AUC_test_comp, index=ind_df_comp, columns=colauc)
df_compauc["sil_plsda"] = sil_train
df_compauc.loc["Mean"] = df_compauc.mean()
alglen = len(alglist)
alglist1 = []
for i in range(alglen):
alglist1.append(alglist[i])
df_timeElapsed = pd.DataFrame(
timeElapsedMatrix, index=ind_df_comp, columns=alglist1
)
df_timeElapsed.loc["Mean"] = df_timeElapsed.mean()
return df_comp, df_timeElapsed, df_compauc, top_features_list
def basic_run_tabeta(
func_algo,
func_predict,
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=1.5,
beta=0.25,
delta=1.0,
tabeta=[100, 200, 400],
gamma=1,
nfold=4,
rng=1,
showres=True,
saveres=False,
keepfig=False,
outputPath="../results/",
):
"""
It shares the same input as function basic_run_eta except that the eta is
replaced by tabeta. It has also all the output of that of the basic_run_eta
but it has its own 5 more output:
nbm_etas,accG_etas,loss_iter,W_mean_etas,timeElapsed_etas
Note : For now the funciton will save the output, show the figures and save the
results for only the last experiment, i.e. only for the last eta.
This mechanism will be changed in the future update.
"""
n_etas = len(tabeta)
n, d = X.shape
# W_mean_etas stores w for each eta, where w is the mean of W0 along its third axis
W_mean_etas = np.zeros((d, k, n_etas))
loss_iter = np.zeros((n_etas, niter)) # loss for each iteration of each eta
nbm_etas = np.zeros(n_etas, dtype=int)
accG_etas = np.zeros(n_etas)
timeElapsed_etas = np.zeros(n_etas)
for i, eta in enumerate(tabeta):
if i == (n_etas - 1):
(
mu,
nbm,
accG,
loss,
W_mean,
timeElapsed,
topGenes,
normW,
topGenes_normW,
topGenes_mean,
normW_mean,
topGenes_normW_mean,
acctest,
) = basic_run_eta(
func_algo,
func_predict,
X,
YR,
k,
genenames,
clusternames,
eta=eta,
niter=niter,
rho=rho,
tau=tau,
beta=beta,
delta=delta,
gamma=gamma,
nfold=nfold,
rng=rng,
showres=True,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
else:
nbm, accG, loss, W_mean, timeElapsed = basic_run_eta(
func_algo,
func_predict,
X,
YR,
k,
genenames,
clusternames,
eta=eta,
niter=niter,
rho=rho,
tau=tau,
beta=beta,
delta=delta,
gamma=gamma,
nfold=nfold,
rng=rng,
showres=False,
saveres=False,
outputPath=outputPath,
)[1:6]
nbm_etas[i] = nbm
accG_etas[i] = accG
loss_iter[i, :] = loss
W_mean_etas[:, :, i] = W_mean
timeElapsed_etas[i] = timeElapsed
if showres == True:
file_tag = func_algo.__name__
fig_avn = plt.figure(figsize=(8, 6))
plt.plot(nbm_etas, accG_etas, "bo-", linewidth=3)
plt.title(
"Figure: Accuracy VS Number of genes \n({})".format(file_tag), fontsize=16
)
plt.ylabel("Accuracy", fontsize=16)
plt.xlabel("Number of genes", fontsize=16)
plt.xlim([min(nbm_etas), max(nbm_etas)])
# if saveres == True:
# fig_avn.savefig('{}{}_AccVSNbG.png'.format(outputPath,file_tag))
nbm_etas = pd.DataFrame(nbm_etas, index=tabeta)
accG_etas = pd.DataFrame(accG_etas, index=tabeta)
loss_iter = pd.DataFrame(
loss_iter,
index=tabeta,
columns=np.linspace(1, niter, niter, endpoint=True, dtype=int),
).transpose()
timeElapsed_etas = pd.DataFrame(timeElapsed_etas, index=tabeta)
if saveres:
nbm_etas.to_csv(
"{}{}_Num_Features.csv".format(outputPath, func_algo.__name__), sep=";"
)
accG_etas.to_csv(
"{}{}_Acc_tabEta.csv".format(outputPath, func_algo.__name__), sep=";"
)
return (
mu,
nbm,
accG,
loss,
W_mean,
timeElapsed,
topGenes,
normW,
topGenes_normW,
topGenes_mean,
normW_mean,
topGenes_normW_mean,
acctest,
nbm_etas,
accG_etas,
loss_iter,
W_mean_etas,
timeElapsed_etas,
)
# ================================== Part 3 ====================================
# ===================== Exact algos ===========================
def run_FISTA_eta(
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
eta=500,
beta=0.25,
delta=1.0,
gamma=1.0,
nfold=4,
showres=False,
saveres=False,
keepfig=False,
outputPath="../results/",
):
return basic_run_eta(
FISTA_Primal,
predict_FISTA,
X,
YR,
k,
genenames,
clusternames,
niter=niter,
nfold=nfold,
beta=beta,
delta=delta,
eta=eta,
gamma=gamma,
showres=showres,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
def run_primal_dual_L1N_eta(
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=1.5,
beta=0.25,
delta=1.0,
eta=500,
nfold=4,
random_seed=1,
showres=True,
saveres=False,
keepfig=False,
outputPath="../results/",
):
return basic_run_eta(
primal_dual_L1N,
predict_L1,
X,
YR,
k,
genenames,
clusternames,
niter=niter,
beta=beta,
delta=delta,
eta=eta,
rho=rho,
tau=tau,
nfold=nfold,
rng=random_seed,
showres=showres,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
def run_primal_dual_L1N_eta_compare(
X,
YR,
k,
alglist,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=1.5,
beta=0.25,
delta=1.0,
eta=500,
nfold=4,
random_seed=1,
showres=False,
saveres=False,
keepfig=False,
outputPath="../results/",
):
return basic_run_eta_compare(
primal_dual_L1N,
predict_L1,
X,
YR,
k,
alglist,
genenames,
clusternames,
niter=niter,
beta=beta,
delta=delta,
eta=eta,
rho=rho,
tau=tau,
nfold=nfold,
rng=random_seed,
showres=showres,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
def run_primal_dual_Nuclear_eta(
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=1.5,
beta=0.25,
delta=1.0,
eta_star=500,
nfold=4,
random_seed=1,
showres=True,
saveres=False,
keepfig=False,
outputPath="../results/",
):
return basic_run_eta(
primal_dual_Nuclear,
predict_L1,
X,
YR,
k,
genenames,
clusternames,
niter=niter,
beta=beta,
delta=delta,
eta_star=eta_star,
rho=rho,
tau=tau,
nfold=nfold,
rng=random_seed,
showres=showres,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
def run_FISTA_tabeta(
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
tabeta=[100, 200, 300, 400, 500],
gamma=1.0,
nfold=4,
random_seed=1,
showres=True,
saveres=False,
keepfig=False,
outputPath="../results/",
):
return basic_run_tabeta(
FISTA_Primal,
predict_FISTA,
X,
YR,
k,
genenames,
clusternames,
niter=niter,
tabeta=tabeta,
gamma=gamma,
nfold=nfold,
rng=random_seed,
showres=showres,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
def run_primal_dual_L1N_tabeta(
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
nfold=4,
delta=1.0,
random_seed=1,
tabeta=[10, 20, 50, 75, 100, 200, 300],
showres=True,
keepfig=False,
saveres=False,
outputPath="../results/",
):
return basic_run_tabeta(
primal_dual_L1N,
predict_L1,
X,
YR,
k,
genenames=genenames,
clusternames=clusternames,
niter=niter,
tabeta=tabeta,
rho=rho,
tau=tau,
beta=beta,
nfold=nfold,
delta=delta,
rng=random_seed,
showres=showres,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
def run_primal_dual_Nuclear_tabEtastar(
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=1.5,
beta=0.25,
delta=1.0,
tabEtastar=[100, 200, 400],
gamma=1,
nfold=4,
rng=1,
showres=True,
saveres=False,
keepfig=False,
outputPath="../results/",
):
"""
It shares the same input as function basic_run_eta except that the eta is
replaced by tabeta. It has also all the output of that of the basic_run_eta
but it has its own 5 more output:
nbm_etas,accG_etas,loss_iter,W_mean_etas,timeElapsed_etas
Note : For now the funciton will save the output, show the figures and save the
results for only the last experiment, i.e. only for the last eta.
This mechanism will be changed in the future update.
"""
n_etas = len(tabEtastar)
n, d = X.shape
# W_mean_etas stores w for each eta, where w is the mean of W0 along its third axis
W_mean_etas = np.zeros((d, k, n_etas))
loss_iter = np.zeros((n_etas, niter)) # loss for each iteration of each eta
nbm_etas = np.zeros(n_etas, dtype=int)
accG_etas = np.zeros(n_etas)
timeElapsed_etas = np.zeros(n_etas)
for i, eta in enumerate(tabEtastar):
if i == (n_etas - 1):
(
mu,
nbm,
accG,
loss,
W_mean,
timeElapsed,
topGenes,
normW,
topGenes_normW,
topGenes_mean,
normW_mean,
topGenes_normW_mean,
acctest,
) = basic_run_eta(
primal_dual_Nuclear,
predict_L1,
X,
YR,
k,
genenames,
clusternames,
eta_star=eta,
niter=niter,
rho=rho,
tau=tau,
beta=beta,
delta=delta,
gamma=gamma,
nfold=nfold,
rng=rng,
showres=True,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
else:
mu, nbm, accG, loss, W_mean, timeElapsed = basic_run_eta(
primal_dual_Nuclear,
predict_L1,
X,
YR,
k,
genenames,
clusternames,
eta_star=eta,
niter=niter,
rho=rho,
tau=tau,
beta=beta,
delta=delta,
gamma=gamma,
nfold=nfold,
rng=rng,
showres=False,
saveres=False,
outputPath=outputPath,
)[0:6]
accG_etas[i] = accG
loss_iter[i, :] = loss
W_mean_etas[:, :, i] = W_mean
timeElapsed_etas[i] = timeElapsed
accG_etas = | pd.DataFrame(accG_etas, index=tabEtastar) | pandas.DataFrame |
import os
import collections
import unittest
import pytest
import pytz
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import pvlib
from .context import capdata as pvc
data = np.arange(0, 1300, 54.167)
index = pd.date_range(start='1/1/2017', freq='H', periods=24)
df = pd.DataFrame(data=data, index=index, columns=['poa'])
# capdata = pvc.CapData('capdata')
# capdata.df = df
"""
Run all tests from project root:
'python -m tests.test_CapData'
Run individual tests:
'python -m unittest tests.test_CapData.Class.Method'
-m flag imports unittest as module rather than running as script
Run tests using pytest use the following from project root.
To run a class of tests
pytest tests/test_CapData.py::TestCapDataEmpty
To run a specific test:
pytest tests/test_CapData.py::TestCapDataEmpty::test_capdata_empty
"""
test_files = ['test1.csv', 'test2.csv', 'test3.CSV', 'test4.txt',
'pvsyst.csv', 'pvsyst_data.csv']
class TestUpdateSummary:
"""Test the update_summary wrapper and functions used within."""
def test_round_kwarg_floats(self):
"""Tests round kwarg_floats."""
kwarg_dict = {'ref_val': 763.4536140499999, 't1': 2, 'inplace': True}
rounded_kwarg_dict_3 = {'ref_val': 763.454, 't1': 2,
'inplace': True}
assert pvc.round_kwarg_floats(kwarg_dict) == rounded_kwarg_dict_3
rounded_kwarg_dict_4 = {'ref_val': 763.4536, 't1': 2,
'inplace': True}
assert pvc.round_kwarg_floats(kwarg_dict, 4) == rounded_kwarg_dict_4
def test_tstamp_kwarg_to_strings(self):
"""Tests coversion of kwarg values from timestamp to strings."""
start_datetime = pd.to_datetime('10/10/1990 00:00')
kwarg_dict = {'start': start_datetime, 't1': 2}
kwarg_dict_str_dates = {'start': '1990-10-10 00:00', 't1': 2}
assert pvc.tstamp_kwarg_to_strings(kwarg_dict) == kwarg_dict_str_dates
class TestTopLevelFuncs(unittest.TestCase):
def test_perc_wrap(self):
"""Test percent wrap function."""
rng = np.arange(1, 100, 1)
rng_cpy = rng.copy()
df = pd.DataFrame({'vals': rng})
df_cpy = df.copy()
bool_array = []
for val in rng:
np_perc = np.percentile(rng, val, interpolation='nearest')
wrap_perc = df.agg(pvc.perc_wrap(val)).values[0]
bool_array.append(np_perc == wrap_perc)
self.assertTrue(all(bool_array),
'np.percentile wrapper gives different value than np perc')
self.assertTrue(all(df == df_cpy), 'perc_wrap function modified input df')
def test_filter_irr(self):
rng = np.arange(0, 1000)
df = pd.DataFrame(np.array([rng, rng+100, rng+200]).T,
columns = ['weather_station irr poa W/m^2',
'col_1', 'col_2'])
df_flt = pvc.filter_irr(df, 'weather_station irr poa W/m^2', 50, 100)
self.assertEqual(df_flt.shape[0], 51,
'Incorrect number of rows returned from filter.')
self.assertEqual(df_flt.shape[1], 3,
'Incorrect number of columns returned from filter.')
self.assertEqual(df_flt.columns[0], 'weather_station irr poa W/m^2',
'Filter column name inadverdently modified by method.')
self.assertEqual(df_flt.iloc[0, 0], 50,
'Minimum value in returned data in filter column is'
'not equal to low argument.')
self.assertEqual(df_flt.iloc[-1, 0], 100,
'Maximum value in returned data in filter column is'
'not equal to high argument.')
def test_fit_model(self):
"""
Test fit model func which wraps statsmodels ols.fit for dataframe.
"""
rng = np.random.RandomState(1)
x = 50 * abs(rng.rand(50))
y = 2 * x - 5 + 5 * rng.randn(50)
df = pd.DataFrame({'x': x, 'y': y})
fml = 'y ~ x - 1'
passed_ind_vars = fml.split('~')[1].split()[::2]
try:
passed_ind_vars.remove('1')
except ValueError:
pass
reg = pvc.fit_model(df, fml=fml)
for var in passed_ind_vars:
self.assertIn(var, reg.params.index,
'{} ind variable in formula argument not in model'
'parameters'.format(var))
def test_predict(self):
x = np.arange(0, 50)
y1 = x
y2 = x * 2
y3 = x * 10
dfs = [pd.DataFrame({'x': x, 'y': y1}),
pd.DataFrame({'x': x, 'y': y2}),
pd.DataFrame({'x': x, 'y': y3})]
reg_lst = []
for df in dfs:
reg_lst.append(pvc.fit_model(df, fml='y ~ x'))
reg_ser = pd.Series(reg_lst)
for regs in [reg_lst, reg_ser]:
preds = pvc.predict(regs, pd.DataFrame({'x': [10, 10, 10]}))
self.assertAlmostEqual(preds.iloc[0], 10, 7, 'Pred for x = y wrong.')
self.assertAlmostEqual(preds.iloc[1], 20, 7, 'Pred for x = y * 2 wrong.')
self.assertAlmostEqual(preds.iloc[2], 100, 7, 'Pred for x = y * 10 wrong.')
self.assertEqual(3, preds.shape[0], 'Each of the three input'
'regressions should have a'
'prediction')
def test_pred_summary(self):
"""Test aggregation of reporting conditions and predicted results."""
"""
grpby -> df of regressions
regs -> series of predicted values
df of reg parameters
"""
pvsyst = pvc.CapData('pvsyst')
pvsyst.load_data(path='./tests/data/', load_pvsyst=True)
df_regs = pvsyst.data.loc[:, ['E_Grid', 'GlobInc', 'TAmb', 'WindVel']]
df_regs_day = df_regs.query('GlobInc > 0')
grps = df_regs_day.groupby(pd.Grouper(freq='M', label='right'))
ones = np.ones(12)
irr_rc = ones * 500
temp_rc = ones * 20
w_vel = ones
rcs = pd.DataFrame({'GlobInc': irr_rc, 'TAmb': temp_rc, 'WindVel': w_vel})
results = pvc.pred_summary(grps, rcs, 0.05,
fml='E_Grid ~ GlobInc +'
'I(GlobInc * GlobInc) +'
'I(GlobInc * TAmb) +'
'I(GlobInc * WindVel) - 1')
self.assertEqual(results.shape[0], 12, 'Not all months in results.')
self.assertEqual(results.shape[1], 10, 'Not all cols in results.')
self.assertIsInstance(results.index,
pd.core.indexes.datetimes.DatetimeIndex,
'Index is not pandas DatetimeIndex')
col_length = len(results.columns.values)
col_set_length = len(set(results.columns.values))
self.assertEqual(col_set_length, col_length,
'There is a duplicate column name in the results df.')
pt_qty_exp = [341, 330, 392, 390, 403, 406,
456, 386, 390, 346, 331, 341]
gaur_cap_exp = [3089550.4039329495, 3103610.4635679387,
3107035.251399103, 3090681.1145782764,
3058186.270209293, 3059784.2309170915,
3088294.50827525, 3087081.0026879036,
3075251.990424683, 3093287.331878834,
3097089.7852036236, 3084318.093294242]
for i, mnth in enumerate(results.index):
self.assertLess(results.loc[mnth, 'guaranteedCap'],
results.loc[mnth, 'PredCap'],
'Gauranteed capacity is greater than predicted in'
'month {}'.format(mnth))
self.assertGreater(results.loc[mnth, 'guaranteedCap'], 0,
'Gauranteed capacity is less than 0 in'
'month {}'.format(mnth))
self.assertAlmostEqual(results.loc[mnth, 'guaranteedCap'],
gaur_cap_exp[i], 7,
'Gauranted capacity not equal to expected'
'value in {}'.format(mnth))
self.assertEqual(results.loc[mnth, 'pt_qty'], pt_qty_exp[i],
'Point quantity not equal to expected values in'
'{}'.format(mnth))
def test_perc_bounds_perc(self):
bounds = pvc.perc_bounds(20)
self.assertEqual(bounds[0], 0.8,
'{} for 20 perc is not 0.8'.format(bounds[0]))
self.assertEqual(bounds[1], 1.2,
'{} for 20 perc is not 1.2'.format(bounds[1]))
def test_perc_bounds_tuple(self):
bounds = pvc.perc_bounds((15, 40))
self.assertEqual(bounds[0], 0.85,
'{} for 15 perc is not 0.85'.format(bounds[0]))
self.assertEqual(bounds[1], 1.4,
'{} for 40 perc is not 1.4'.format(bounds[1]))
def test_filter_grps(self):
pvsyst = pvc.CapData('pvsyst')
pvsyst.load_data(path='./tests/data/',
fname='pvsyst_example_HourlyRes_2.CSV',
load_pvsyst=True)
pvsyst.set_regression_cols(power='real_pwr--', poa='irr-poa-',
t_amb='temp-amb-', w_vel='wind--')
pvsyst.filter_irr(200, 800)
pvsyst.rep_cond(freq='MS')
grps = pvsyst.data_filtered.groupby(pd.Grouper(freq='MS', label='left'))
poa_col = pvsyst.column_groups[pvsyst.regression_cols['poa']][0]
grps_flt = pvc.filter_grps(grps, pvsyst.rc, poa_col, 0.8, 1.2)
self.assertIsInstance(grps_flt,
pd.core.groupby.generic.DataFrameGroupBy,
'Returned object is not a dataframe groupby.')
self.assertEqual(grps.ngroups, grps_flt.ngroups,
'Returned groubpy does not have the same number of\
groups as passed groupby.')
cnts_before_flt = grps.count()[poa_col]
cnts_after_flt = grps_flt.count()[poa_col]
less_than = all(cnts_after_flt < cnts_before_flt)
self.assertTrue(less_than, 'Points were not removed for each group.')
def test_perc_difference(self):
result = pvc.perc_difference(9, 10)
self.assertAlmostEqual(result, 0.105263158)
result = pvc.perc_difference(10, 9)
self.assertAlmostEqual(result, 0.105263158)
result = pvc.perc_difference(10, 10)
self.assertAlmostEqual(result, 0)
result = pvc.perc_difference(0, 0)
self.assertAlmostEqual(result, 0)
def test_check_all_perc_diff_comb(self):
ser = | pd.Series([10.1, 10.2]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 7 10:38:13 2022
@author: aoust
"""
import numpy as np
import pandas
import matplotlib.pyplot as plt
import scipy.stats
d_3_12 = pandas.read_csv("../output/output_heur3_1.200000.csv",sep = ";",header=2)
d_3_12["Instance name "] = d_3_12["Instance name "] + "_3channels"
d_6_12 = pandas.read_csv("../output/output_heur6_1.200000.csv",sep = ";",header=2)
d_6_12["Instance name "] = d_6_12["Instance name "] + "_6channels"
d12 = pandas.concat([d_3_12,d_6_12])
d_3_15 = pandas.read_csv("../output/output_heur3_1.500000.csv",sep = ";",header=2)
d_3_15["Instance name "] = d_3_15["Instance name "] + "_3channels"
d_6_15 = pandas.read_csv("../output/output_heur6_1.500000.csv",sep = ";",header=2)
d_6_15["Instance name "] = d_6_15["Instance name "] + "_6channels"
d15 = pandas.concat([d_3_15,d_6_15])
d_3_2 = pandas.read_csv("../output/output_heur3_2.000000.csv",sep = ";",header=2)
d_3_2["Instance name "] = d_3_2["Instance name "] + "_3channels"
d_6_2 = pandas.read_csv("../output/output_heur6_2.000000.csv",sep = ";",header=2)
d_6_2["Instance name "] = d_6_2["Instance name "] + "_6channels"
d2 = pandas.concat([d_3_2,d_6_2])
d_3_3 = pandas.read_csv("../output/output_heur3_3.000000.csv",sep = ";",header=2)
d_3_3["Instance name "] = d_3_3["Instance name "] + "_3channels"
d_6_3 = pandas.read_csv("../output/output_heur6_3.000000.csv",sep = ";",header=2)
d_6_3["Instance name "] = d_6_3["Instance name "] + "_6channels"
d3 = pandas.concat([d_3_3,d_6_3])
d_3_4 = pandas.read_csv("../output/output_heur3_4.000000.csv",sep = ";",header=2)
d_3_4["Instance name "] = d_3_4["Instance name "] + "_3channels"
d_6_4 = pandas.read_csv("../output/output_heur6_4.000000.csv",sep = ";",header=2)
d_6_4["Instance name "] = d_6_4["Instance name "] + "_6channels"
d4 = | pandas.concat([d_3_4,d_6_4]) | pandas.concat |
import pandas as pd
import os
base_dir = os.path.abspath(os.path.dirname(__file__))
df = pd.read_csv(os.path.join(base_dir, 'Hotel_Reviews.csv'))
df = | pd.concat([df['Negative_Review'], df['Positive_Review']], ignore_index=True) | pandas.concat |
import pandas as pd
class Feature(object):
def __init__(self, name, variable_type, transform):
self.name = name
self.variable_type = variable_type
self.transform = transform
def apply_features_to_markets(features, markets_data):
def apply_features_to_market(features, market):
# get the new data series by computing the features.
features_series = list(map(lambda f: f.transform(market['data']), features))
feature_names = [f.name for f in features]
features_df = | pd.concat(features_series, axis=1, keys=feature_names) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
import kmertools as kt
transitions_path = '/Users/simonelongo/Documents/QuinlanLabFiles/kmer_data/results/kp_21oct2019/bp_counts_per3mer.csv'
counts_path = '/Users/simonelongo/Documents/QuinlanLabFiles/kmer_data/data/ref_genome_kmer_freq.csv'
counts = | pd.read_csv(counts_path, index_col=0) | pandas.read_csv |
import pytest
from cellrank.tl._colors import _map_names_and_colors, _create_categorical_colors
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
from matplotlib.colors import is_color_like
class TestColors:
def test_create_categorical_colors_too_many_colors(self):
with pytest.raises(ValueError):
_create_categorical_colors(1000)
def test_create_categorical_colors_no_categories(self):
c = _create_categorical_colors(0)
assert c == []
def test_create_categorical_colors_neg_categories(self):
with pytest.raises(RuntimeError):
_create_categorical_colors(-1)
def test_create_categorical_colors_normal_run(self):
colors = _create_categorical_colors(62)
assert len(colors) == 62
assert all(map(lambda c: isinstance(c, str), colors))
assert all(map(lambda c: is_color_like(c), colors))
class TestMappingColors:
def test_mapping_colors_not_categorical(self):
query = pd.Series(["foo", "bar", "baz"], dtype="str")
reference = pd.Series(["foo", np.nan, "bar", "baz"], dtype="category")
with pytest.raises(TypeError):
_map_names_and_colors(reference, query)
def test_mapping_colors_invalid_size(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", np.nan, "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(reference, query)
def test_mapping_colors_different_index(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category", index=[2, 3, 4])
reference = pd.Series(["foo", "bar", "baz"], dtype="category", index=[1, 2, 3])
with pytest.raises(ValueError):
_map_names_and_colors(reference, query)
def test_mapping_colors_invalid_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(
reference, query, colors_reference=["red", "green", "foo"]
)
def test_mapping_colors_too_few_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(reference, query, colors_reference=["red", "green"])
def test_mapping_colors_simple_1(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, np.nan, "d", "a"]).astype("category")
expected = pd.Series(["a_1", "a_2", "b"])
expected_index = pd.Index(["a", "b", "d"])
res = _map_names_and_colors(x, y)
assert isinstance(res, pd.Series)
np.testing.assert_array_equal(res.values, expected.values)
np.testing.assert_array_equal(res.index.values, expected_index.values)
def test_mapping_colors_simple_2(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res = _map_names_and_colors(reference, query)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
def test_mapping_colors_simple_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res, c = _map_names_and_colors(
reference, query, colors_reference=["red", "green", "blue"]
)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
assert isinstance(c, list)
assert c == ["#ff0000", "#008000", "#0000ff"]
def test_mapping_colors_too_many_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res, c = _map_names_and_colors(
reference, query, colors_reference=["red", "green", "blue", "black"]
)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
assert isinstance(c, list)
assert c == ["#ff0000", "#008000", "#0000ff"]
def test_mapping_colors_different_color_representation(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res, c = _map_names_and_colors(
reference, query, colors_reference=[(1, 0, 0), "green", (0, 0, 1, 0)]
)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
assert isinstance(c, list)
assert c == ["#ff0000", "#008000", "#0000ff"]
def test_mapping_colors_non_unique_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res, c = _map_names_and_colors(
reference, query, colors_reference=["red", "red", "red"]
)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
assert isinstance(c, list)
assert c == ["#ff0000", "#ff0000", "#ff0000"]
def test_mapping_colors_same_reference(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "foo", "foo"], dtype="category")
r, c = _map_names_and_colors(
reference, query, colors_reference=["red", "red", "red"]
)
assert list(r.index) == ["bar", "baz", "foo"]
assert list(r.values) == ["foo_1", "foo_2", "foo_3"]
assert c == ["#b20000", "#d13200", "#f07300"]
def test_mapping_colors_diff_query_reference(self):
query = pd.Series(["bar", "bar", "bar"], dtype="category")
reference = pd.Series(["foo", "foo", "foo"], dtype="category")
r, c = _map_names_and_colors(
reference, query, colors_reference=["red", "red", "red"]
)
assert list(r.index) == ["bar"]
assert list(r.values) == ["foo"]
assert c == ["#ff0000"]
def test_mapping_colors_empty(self):
query = pd.Series([], dtype="category")
reference = pd.Series([], dtype="category")
r = _map_names_and_colors(reference, query)
assert isinstance(r, pd.Series)
assert is_categorical_dtype(r)
def test_mapping_colors_empty_with_color(self):
query = pd.Series([], dtype="category")
reference = pd.Series([], dtype="category")
r, c = _map_names_and_colors(reference, query, colors_reference=[])
assert isinstance(r, pd.Series)
assert is_categorical_dtype(r)
assert isinstance(c, list)
assert len(c) == 0
def test_mapping_colors_negative_en_cutoff(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(reference, query, en_cutoff=-1)
def test_mapping_colors_0_en_cutoff(self):
query = | pd.Series(["bar", "bar", "bar"], dtype="category") | pandas.Series |
"""
Module contains tools for processing files into DataFrames or other objects
"""
from collections import abc, defaultdict
import csv
import datetime
from io import StringIO
import itertools
import re
import sys
from textwrap import fill
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Type,
cast,
)
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.ops as libops
import pandas._libs.parsers as parsers
from pandas._libs.parsers import STR_NA_VALUES
from pandas._libs.tslibs import parsing
from pandas._typing import FilePathOrBuffer, StorageOptions, Union
from pandas.errors import (
AbstractMethodError,
EmptyDataError,
ParserError,
ParserWarning,
)
from pandas.util._decorators import Appender
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
ensure_object,
ensure_str,
is_bool_dtype,
is_categorical_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_file_like,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms, generic
from pandas.core.arrays import Categorical
from pandas.core.frame import DataFrame
from pandas.core.indexes.api import (
Index,
MultiIndex,
RangeIndex,
ensure_index_from_sequences,
)
from pandas.core.series import Series
from pandas.core.tools import datetimes as tools
from pandas.io.common import IOHandles, get_handle, validate_header_arg
from pandas.io.date_converters import generic_parser
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = "\ufeff"
_doc_read_csv_and_table = (
r"""
{summary}
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
Delimiter to use. If sep is None, the C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used and automatically detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header=None``. Explicitly pass ``header=0`` to be able to
replace existing names. The header can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header row,
then you should explicitly pass ``header=0`` to override the column names.
Duplicates in this list are not allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``DataFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force pandas to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a DataFrame from ``data`` with element order preserved use
``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
prefix : str, optional
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
Use `str` or `object` together with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {{'c', 'python'}}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparsable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``pd.to_datetime`` after
``pd.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partially-applied
:func:`pandas.to_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : bool, default False
If True and `parse_dates` is enabled, pandas will attempt to infer the
format of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM format dates, international and European format.
cache_dates : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or getting chunks with
``get_chunk()``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
lineterminator : str (length 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in 'a,b,c' being
treated as the header.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
error_bad_lines : bool, default True
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will dropped from the DataFrame that is
returned.
warn_bad_lines : bool, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single DataFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_map : bool, default False
If a filepath is provided for `filepath_or_buffer`, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are ``None`` or 'high' for the ordinary converter,
'legacy' for the original lower precision pandas converter, and
'round_trip' for the round-trip converter.
.. versionchanged:: 1.2
{storage_options}
.. versionadded:: 1.2
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
>>> pd.{func_name}('data.csv') # doctest: +SKIP
"""
)
def validate_integer(name, val, min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
"""
msg = f"'{name:s}' must be an integer >={min_val:d}"
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
return val
def _validate_names(names):
"""
Raise ValueError if the `names` parameter contains duplicates or has an
invalid data type.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output DataFrame.
Raises
------
ValueError
If names are not unique or are not ordered (e.g. set).
"""
if names is not None:
if len(names) != len(set(names)):
raise ValueError("Duplicate names are not allowed.")
if not (
is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView)
):
raise ValueError("Names should be an ordered collection.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
if kwds.get("date_parser", None) is not None:
if isinstance(kwds["parse_dates"], bool):
kwds["parse_dates"] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get("iterator", False)
chunksize = validate_integer("chunksize", kwds.get("chunksize", None), 1)
nrows = kwds.get("nrows", None)
# Check for duplicates in names.
_validate_names(kwds.get("names", None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
with parser:
return parser.read(nrows)
_parser_defaults = {
"delimiter": None,
"escapechar": None,
"quotechar": '"',
"quoting": csv.QUOTE_MINIMAL,
"doublequote": True,
"skipinitialspace": False,
"lineterminator": None,
"header": "infer",
"index_col": None,
"names": None,
"prefix": None,
"skiprows": None,
"skipfooter": 0,
"nrows": None,
"na_values": None,
"keep_default_na": True,
"true_values": None,
"false_values": None,
"converters": None,
"dtype": None,
"cache_dates": True,
"thousands": None,
"comment": None,
"decimal": ".",
# 'engine': 'c',
"parse_dates": False,
"keep_date_col": False,
"dayfirst": False,
"date_parser": None,
"usecols": None,
# 'iterator': False,
"chunksize": None,
"verbose": False,
"encoding": None,
"squeeze": False,
"compression": None,
"mangle_dupe_cols": True,
"infer_datetime_format": False,
"skip_blank_lines": True,
}
_c_parser_defaults = {
"delim_whitespace": False,
"na_filter": True,
"low_memory": True,
"memory_map": False,
"error_bad_lines": True,
"warn_bad_lines": True,
"float_precision": None,
}
_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
_c_unsupported = {"skipfooter"}
_python_unsupported = {"low_memory", "float_precision"}
_deprecated_defaults: Dict[str, Any] = {}
_deprecated_args: Set[str] = set()
@Appender(
_doc_read_csv_and_table.format(
func_name="read_csv",
summary="Read a comma-separated values (csv) file into DataFrame.",
_default_sep="','",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_csv(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_map=False,
float_precision=None,
storage_options: StorageOptions = None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": ","}
)
kwds.update(kwds_defaults)
return _read(filepath_or_buffer, kwds)
@Appender(
_doc_read_csv_and_table.format(
func_name="read_table",
summary="Read general delimited file into DataFrame.",
_default_sep=r"'\\t' (tab-stop)",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_table(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_map=False,
float_precision=None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": "\t"}
)
kwds.update(kwds_defaults)
return _read(filepath_or_buffer, kwds)
def read_fwf(
filepath_or_buffer: FilePathOrBuffer,
colspecs="infer",
widths=None,
infer_nrows=100,
**kwds,
):
r"""
Read a table of fixed-width formatted lines into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser determine the
`colspecs`.
.. versionadded:: 0.24.0
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
>>> pd.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, "infer") and widths is not None:
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
kwds["colspecs"] = colspecs
kwds["infer_nrows"] = infer_nrows
kwds["engine"] = "python-fwf"
return _read(filepath_or_buffer, kwds)
class TextFileReader(abc.Iterator):
"""
Passed dialect overrides any of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = "python"
engine_specified = False
self.engine = engine
self._engine_specified = kwds.get("engine_specified", engine_specified)
_validate_skipfooter(kwds)
dialect = _extract_dialect(kwds)
if dialect is not None:
kwds = _merge_with_dialect_properties(dialect, kwds)
if kwds.get("header", "infer") == "infer":
kwds["header"] = 0 if kwds.get("names") is None else None
self.orig_options = kwds
# miscellanea
self._currow = 0
options = self._get_options_with_defaults(engine)
options["storage_options"] = kwds.get("storage_options", None)
self.chunksize = options.pop("chunksize", None)
self.nrows = options.pop("nrows", None)
self.squeeze = options.pop("squeeze", False)
self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if "has_index_names" in kwds:
self.options["has_index_names"] = kwds["has_index_names"]
self._engine = self._make_engine(self.engine)
def close(self):
self._engine.close()
def _get_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in _parser_defaults.items():
value = kwds.get(argname, default)
# see gh-12935
if argname == "mangle_dupe_cols" and not value:
raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
else:
options[argname] = value
for argname, default in _c_parser_defaults.items():
if argname in kwds:
value = kwds[argname]
if engine != "c" and value != default:
if "python" in engine and argname not in _python_unsupported:
pass
elif value == _deprecated_defaults.get(argname, default):
pass
else:
raise ValueError(
f"The {repr(argname)} option is not supported with the "
f"{repr(engine)} engine"
)
else:
value = _deprecated_defaults.get(argname, default)
options[argname] = value
if engine == "python-fwf":
# pandas\io\parsers.py:907: error: Incompatible types in assignment
# (expression has type "object", variable has type "Union[int, str,
# None]") [assignment]
for argname, default in _fwf_defaults.items(): # type: ignore[assignment]
options[argname] = kwds.get(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
# The C engine doesn't need the file-like to have the "__next__"
# attribute. However, the Python engine explicitly calls
# "__next__(...)" when iterating through such an object, meaning it
# needs to have that attribute
raise ValueError(
"The 'python' engine cannot iterate through this file buffer."
)
def _clean_options(self, options, engine):
result = options.copy()
fallback_reason = None
# C engine not supported yet
if engine == "c":
if options["skipfooter"] > 0:
fallback_reason = "the 'c' engine does not support skipfooter"
engine = "python"
sep = options["delimiter"]
delim_whitespace = options["delim_whitespace"]
if sep is None and not delim_whitespace:
if engine == "c":
fallback_reason = (
"the 'c' engine does not support "
"sep=None with delim_whitespace=False"
)
engine = "python"
elif sep is not None and len(sep) > 1:
if engine == "c" and sep == r"\s+":
result["delim_whitespace"] = True
del result["delimiter"]
elif engine not in ("python", "python-fwf"):
# wait until regex engine integrated
fallback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
r"different from '\s+' are interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
if "python" in engine:
result["delimiter"] = r"\s+"
elif sep is not None:
encodeable = True
encoding = sys.getfilesystemencoding() or "utf-8"
try:
if len(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ("python", "python-fwf"):
fallback_reason = (
f"the separator encoded in {encoding} "
"is > 1 char long, and the 'c' engine "
"does not support such separators"
)
engine = "python"
quotechar = options["quotechar"]
if quotechar is not None and isinstance(quotechar, (str, bytes)):
if (
len(quotechar) == 1
and ord(quotechar) > 127
and engine not in ("python", "python-fwf")
):
fallback_reason = (
"ord(quotechar) > 127, meaning the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support such quotechars"
)
engine = "python"
if fallback_reason and self._engine_specified:
raise ValueError(fallback_reason)
if engine == "c":
for arg in _c_unsupported:
del result[arg]
if "python" in engine:
for arg in _python_unsupported:
if fallback_reason and result[arg] != _c_parser_defaults[arg]:
raise ValueError(
"Falling back to the 'python' engine because "
f"{fallback_reason}, but this causes {repr(arg)} to be "
"ignored as it is not supported by the 'python' engine."
)
del result[arg]
if fallback_reason:
warnings.warn(
(
"Falling back to the 'python' engine because "
f"{fallback_reason}; you can avoid this warning by specifying "
"engine='python'."
),
ParserWarning,
stacklevel=5,
)
index_col = options["index_col"]
names = options["names"]
converters = options["converters"]
na_values = options["na_values"]
skiprows = options["skiprows"]
validate_header_arg(options["header"])
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
if result.get(arg, depr_default) != depr_default:
msg = (
f"The {arg} argument has been deprecated and will be "
"removed in a future version.\n\n"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
result[arg] = parser_default
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
if not isinstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result["index_col"] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not isinstance(converters, dict):
raise TypeError(
"Type converters must be a dict or subclass, "
f"input was a {type(converters).__name__}"
)
else:
converters = {}
# Converting values to NA
keep_default_na = options["keep_default_na"]
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is internally handled by the
# c-engine, so only need for python parsers
if engine != "c":
if is_integer(skiprows):
skiprows = list(range(skiprows))
if skiprows is None:
skiprows = set()
elif not callable(skiprows):
skiprows = set(skiprows)
# put stuff back
result["names"] = names
result["converters"] = converters
result["na_values"] = na_values
result["na_fvalues"] = na_fvalues
result["skiprows"] = skiprows
return result, engine
def __next__(self):
try:
return self.get_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine="c"):
mapping: Dict[str, Type[ParserBase]] = {
"c": CParserWrapper,
"python": PythonParser,
"python-fwf": FixedWidthFieldParser,
}
if engine not in mapping:
raise ValueError(
f"Unknown engine: {engine} (valid options are {mapping.keys()})"
)
# error: Too many arguments for "ParserBase"
return mapping[engine](self.f, **self.options) # type: ignore[call-arg]
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
nrows = validate_integer("nrows", nrows)
index, columns, col_dict = self._engine.read(nrows)
if index is None:
if col_dict:
# Any column is actually fine:
new_rows = len(next(iter(col_dict.values())))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = len(index)
df = DataFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and len(df.columns) == 1:
return df[df.columns[0]].copy()
return df
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = min(size, self.nrows - self._currow)
return self.read(nrows=size)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _is_index_col(col):
return col is not None and col is not False
def _is_potential_multi_index(
columns, index_col: Optional[Union[bool, Sequence[int]]] = None
):
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
index_col : None, bool or list, optional
Column or columns to use as the (possibly hierarchical) index
Returns
-------
boolean : Whether or not columns could become a MultiIndex
"""
if index_col is None or isinstance(index_col, bool):
index_col = []
return (
len(columns)
and not isinstance(columns, MultiIndex)
and all(isinstance(c, tuple) for c in columns if c not in list(index_col))
)
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
def _validate_usecols_names(usecols, names):
"""
Validates that all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if len(missing) > 0:
raise ValueError(
f"Usecols do not match columns, columns expected but not found: {missing}"
)
return usecols
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
return skipfooter
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains all integers
(column selection by index), strings (column by name) or is a callable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, callable, or None
List of columns to use when parsing or a callable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a callable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a callable or None is passed in.
"""
msg = (
"'usecols' must either be list-like of all strings, all unicode, "
"all integers or a callable."
)
if usecols is not None:
if callable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = (
"Only booleans, lists, and dictionaries are accepted "
"for the 'parse_dates' parameter"
)
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
class ParserBase:
def __init__(self, kwds):
self.names = kwds.get("names")
self.orig_names: Optional[List] = None
self.prefix = kwds.pop("prefix", None)
self.index_col = kwds.get("index_col", None)
self.unnamed_cols: Set = set()
self.index_names: Optional[List] = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
self.date_parser = kwds.pop("date_parser", None)
self.dayfirst = kwds.pop("dayfirst", False)
self.keep_date_col = kwds.pop("keep_date_col", False)
self.na_values = kwds.get("na_values")
self.na_fvalues = kwds.get("na_fvalues")
self.na_filter = kwds.get("na_filter", False)
self.keep_default_na = kwds.get("keep_default_na", True)
self.true_values = kwds.get("true_values")
self.false_values = kwds.get("false_values")
self.mangle_dupe_cols = kwds.get("mangle_dupe_cols", True)
self.infer_datetime_format = kwds.pop("infer_datetime_format", False)
self.cache_dates = kwds.pop("cache_dates", True)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_format=self.infer_datetime_format,
cache_dates=self.cache_dates,
)
# validate header options for mi
self.header = kwds.get("header")
if isinstance(self.header, (list, tuple, np.ndarray)):
if not all(map(is_integer, self.header)):
raise ValueError("header must be integer or list of integers")
if any(i < 0 for i in self.header):
raise ValueError(
"cannot specify multi-index header with negative integers"
)
if kwds.get("usecols"):
raise ValueError(
"cannot specify usecols when specifying a multi-index header"
)
if kwds.get("names"):
raise ValueError(
"cannot specify names when specifying a multi-index header"
)
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = isinstance(self.index_col, (list, tuple, np.ndarray))
if not (
is_sequence
and all(map(is_integer, self.index_col))
or is_integer(self.index_col)
):
raise ValueError(
"index_col must only contain row numbers "
"when specifying a multi-index header"
)
elif self.header is not None:
# GH 27394
if self.prefix is not None:
raise ValueError(
"Argument prefix must be None if argument header is not None"
)
# GH 16338
elif not is_integer(self.header):
raise ValueError("header must be integer or list of integers")
# GH 27779
elif self.header < 0:
raise ValueError(
"Passing negative integer to header is invalid. "
"For no header, use header=None instead"
)
self._name_processed = False
self._first_chunk = True
self.handles: Optional[IOHandles] = None
def _open_handles(self, src: FilePathOrBuffer, kwds: Dict[str, Any]) -> None:
"""
Let the readers open IOHanldes after they are done with their potential raises.
"""
self.handles = get_handle(
src,
"r",
encoding=kwds.get("encoding", None),
compression=kwds.get("compression", None),
memory_map=kwds.get("memory_map", False),
storage_options=kwds.get("storage_options", None),
)
def _validate_parse_dates_presence(self, columns: List[str]) -> None:
"""
Check if parse_dates are in columns.
If user has provided names for parse_dates, check if those columns
are available.
Parameters
----------
columns : list
List of names of the dataframe.
Raises
------
ValueError
If column to parse_date is not in dataframe.
"""
cols_needed: Iterable
if is_dict_like(self.parse_dates):
cols_needed = itertools.chain(*self.parse_dates.values())
elif is_list_like(self.parse_dates):
# a column in parse_dates could be represented
# ColReference = Union[int, str]
# DateGroups = List[ColReference]
# ParseDates = Union[DateGroups, List[DateGroups],
# Dict[ColReference, DateGroups]]
cols_needed = itertools.chain.from_iterable(
col if is_list_like(col) else [col] for col in self.parse_dates
)
else:
cols_needed = []
# get only columns that are references using names (str), not by index
missing_cols = ", ".join(
sorted(
{
col
for col in cols_needed
if isinstance(col, str) and col not in columns
}
)
)
if missing_cols:
raise ValueError(
f"Missing column provided to 'parse_dates': '{missing_cols}'"
)
def close(self):
if self.handles is not None:
self.handles.close()
@property
def _has_complex_date_col(self):
return isinstance(self.parse_dates, dict) or (
isinstance(self.parse_dates, list)
and len(self.parse_dates) > 0
and isinstance(self.parse_dates[0], list)
)
def _should_parse_dates(self, i):
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = self.index_col[i]
if is_scalar(self.parse_dates):
return (j == self.parse_dates) or (
name is not None and name == self.parse_dates
)
else:
return (j in self.parse_dates) or (
name is not None and name in self.parse_dates
)
def _extract_multi_indexer_columns(
self, header, index_names, col_names, passed_names=False
):
"""
extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers
"""
if len(header) < 2:
return header[0], index_names, col_names, passed_names
# the names are the tuples of the header that are not the index cols
# 0 is the name of the index, assuming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not isinstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header.pop(-1)
index_names, names, index_col = _clean_index_names(
index_names, self.index_col, self.unnamed_cols
)
# extract the columns
field_count = len(header[0])
def extract(r):
return tuple(r[i] for i in range(field_count) if i not in sic)
columns = list(zip(*(extract(r) for r in header)))
names = ic + columns
# If we find unnamed columns all in a single
# level, then our header was too long.
for n in range(len(columns[0])):
if all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
header = ",".join(str(x) for x in self.header)
raise ParserError(
f"Passed header=[{header}] are too many rows "
"for this multi_index of columns"
)
# Clean the column names (if we have an index_col).
if len(ic):
col_names = [
r[0] if ((r[0] is not None) and r[0] not in self.unnamed_cols) else None
for r in header
]
else:
col_names = [None] * len(header)
passed_names = True
return names, index_names, col_names, passed_names
def _maybe_dedup_names(self, names):
# see gh-7160 and gh-9424: this helps to provide
# immediate alleviation of the duplicate names
# issue and appears to be satisfactory to users,
# but ultimately, not needing to butcher the names
# would be nice!
if self.mangle_dupe_cols:
names = list(names) # so we can index
# pandas\io\parsers.py:1559: error: Need type annotation for
# 'counts' [var-annotated]
counts = defaultdict(int) # type: ignore[var-annotated]
is_potential_mi = _is_potential_multi_index(names, self.index_col)
for i, col in enumerate(names):
cur_count = counts[col]
while cur_count > 0:
counts[col] = cur_count + 1
if is_potential_mi:
col = col[:-1] + (f"{col[-1]}.{cur_count}",)
else:
col = f"{col}.{cur_count}"
cur_count = counts[col]
names[i] = col
counts[col] = cur_count + 1
return names
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if _is_potential_multi_index(columns):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._get_simple_index(alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _, self.index_col) = _clean_index_names(
list(columns), self.index_col, self.unnamed_cols
)
self._name_processed = True
index = self._get_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = len(indexnamerow) - len(columns)
# pandas\io\parsers.py:1604: error: Item "None" of "Optional[Any]"
# has no attribute "set_names" [union-attr]
index = index.set_names(indexnamerow[:coffset]) # type: ignore[union-attr]
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
_implicit_index = False
def _get_simple_index(self, data, columns):
def ix(col):
if not isinstance(col, str):
return col
raise ValueError(f"Index {col} invalid")
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.append(i)
index.append(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in sorted(to_remove, reverse=True):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
def _get_complex_date_index(self, data, col_names):
def _get_name(icol):
if isinstance(icol, str):
return icol
if col_names is None:
raise ValueError(f"Must supply column order to use {icol!s} as index")
for i, c in enumerate(col_names):
if i == icol:
return c
to_remove = []
index = []
for idx in self.index_col:
name = _get_name(idx)
to_remove.append(name)
index.append(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in sorted(to_remove, reverse=True):
data.pop(c)
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True) -> Index:
arrays = []
for i, arr in enumerate(index):
if try_parse_dates and self._should_parse_dates(i):
arr = self._date_conv(arr)
if self.na_filter:
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
else:
col_na_values = set()
col_na_fvalues = set()
if isinstance(self.na_values, dict):
# pandas\io\parsers.py:1678: error: Value of type
# "Optional[Any]" is not indexable [index]
col_name = self.index_names[i] # type: ignore[index]
if col_name is not None:
col_na_values, col_na_fvalues = _get_na_values(
col_name, self.na_values, self.na_fvalues, self.keep_default_na
)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)
names = self.index_names
index = ensure_index_from_sequences(arrays, names)
return index
def _convert_to_ndarrays(
self, dct, na_values, na_fvalues, verbose=False, converters=None, dtypes=None
):
result = {}
for c, values in dct.items():
conv_f = None if converters is None else converters.get(c, None)
if isinstance(dtypes, dict):
cast_type = dtypes.get(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _get_na_values(
c, na_values, na_fvalues, self.keep_default_na
)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(
(
"Both a converter and dtype were specified "
f"for column {c} - only the converter will be used"
),
ParserWarning,
stacklevel=7,
)
try:
values = lib.map_infer(values, conv_f)
except ValueError:
mask = algorithms.isin(values, list(na_values)).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues, try_num_bool=False
)
else:
is_ea = is_extension_array_dtype(cast_type)
is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type)
# skip inference if specified dtype is object
# or casting to an EA
try_num_bool = not (cast_type and is_str_or_ea_dtype)
# general type inference and conversion
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues, try_num_bool
)
# type specified in dtype param or cast_type is an EA
if cast_type and (
not is_dtype_equal(cvals, cast_type)
or is_extension_array_dtype(cast_type)
):
if not is_ea and na_count > 0:
try:
if is_bool_dtype(cast_type):
raise ValueError(
f"Bool column has NA values in column {c}"
)
except (AttributeError, TypeError):
# invalid input to is_bool_dtype
pass
cvals = self._cast_types(cvals, cast_type, c)
result[c] = cvals
if verbose and na_count:
print(f"Filled {na_count} NA values in column {c!s}")
return result
def _infer_types(self, values, na_values, try_num_bool=True):
"""
Infer types of values, possibly casting
Parameters
----------
values : ndarray
na_values : set
try_num_bool : bool, default try
try to cast values to numeric (first preference) or boolean
Returns
-------
converted : ndarray
na_count : int
"""
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = algorithms.isin(values, list(na_values))
na_count = mask.sum()
if na_count > 0:
if is_integer_dtype(values):
values = values.astype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
if try_num_bool and is_object_dtype(values.dtype):
# exclude e.g DatetimeIndex here
try:
result = lib.maybe_convert_numeric(values, na_values, False)
except (ValueError, TypeError):
# e.g. encountering datetime string gets ValueError
# TypeError can be raised in floatify
result = values
na_count = parsers.sanitize_objects(result, na_values, False)
else:
na_count = isna(result).sum()
else:
result = values
if values.dtype == np.object_:
na_count = | parsers.sanitize_objects(values, na_values, False) | pandas._libs.parsers.sanitize_objects |
from adjustText import adjust_text
import copy
import csv
import matplotlib
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import os
import pandas as pd
import pickle
import scipy.cluster.hierarchy as SciPyClus
import scipy.stats as scs
from singscore.singscore import *
import sys
class PathDir:
dirCurrent = os.path.dirname(sys.argv[0])
dirBaseGit = os.path.dirname(os.path.normpath(dirCurrent))
pathOutFolder = dirBaseGit
for strFolder in ['figures']:
pathOutFolder = os.path.join(pathOutFolder, strFolder)
pathProcRNAData = dirBaseGit
for strFolder in ['preproc', 'rnaseq']:
pathProcRNAData = os.path.join(pathProcRNAData, strFolder)
pathRefData = dirBaseGit
for strFolder in ['preproc', 'ref']:
pathRefData = os.path.join(pathRefData, strFolder)
pathPublicData = dirBaseGit
for strFolder in ['preproc', 'public_data']:
pathPublicData = os.path.join(pathPublicData, strFolder)
class Process:
listLinesForDisp = ['MDA-MB-231',
'SUM159']
listLines = [strLine.replace('-','') for strLine in listLinesForDisp]
listDiffExprFiles = [
f'voom-limma_{strLine}_GAll-EVC_diffExpr.csv' for strLine in listLines]
listOfListsConds = [['NTC', 'NTC', 'NTC',
'EVC', 'EVC', 'EVC',
'g4', 'g4', 'g4',
'gAll', 'gAll', 'gAll'],
['NTC', 'NTC', 'NTC',
'EVC', 'EVC', 'EVC',
'g4', 'g4', 'g4',
'gAll', 'gAll', 'gAll']]
def quant_data(flagResult=False):
strQuantFile = 'Waryah_Oct2017_ZEB1-epiCRISPR_QuantGeneLevel_lengthScaledTPM.csv'
dfData = pd.read_table(os.path.join(PathDir.pathProcRNAData, strQuantFile),
sep=',', header=0, index_col=0)
return dfData
def diff_expr_data(flagResult=False):
listDFToMerge = []
for iFile in range(len(Process.listDiffExprFiles)):
strFileName = Process.listDiffExprFiles[iFile]
# strCond = strFileName.split('.csv')[0]
strCellLine = strFileName.split('_GAll-EVC_diffExpr.csv')[0].split('voom-limma_')[1]
dfIn = pd.read_csv(os.path.join(PathDir.pathProcRNAData, strFileName),
sep=',', header=0, index_col=0)
if iFile == 0:
dfIn.drop(labels=['t', 'P.Value'],
axis=1,
inplace=True)
else:
dfIn.drop(labels=['AveExpr', 't', 'P.Value'],
axis=1,
inplace=True)
arrayHasNullStats = dfIn['adj.P.Val'].isnull().astype(bool)
arrayHasNullDiffExpr = dfIn['logFC'].isnull().astype(bool)
arrayAdjPVals = dfIn['adj.P.Val'].values.astype(float)
arrayLogFC = dfIn['logFC'].values.astype(float)
arrayAdjPVals[np.where(arrayHasNullStats)[0]] = 1.0
arrayLogFC[np.where(arrayHasNullDiffExpr)[0]] = 0.0
dfIn['adj.P.Val'] = pd.Series(arrayAdjPVals, index=dfIn.index.tolist())
dfIn['logFC'] = pd.Series(arrayLogFC, index=dfIn.index.tolist())
listColumns = dfIn.columns.tolist()
dictColToRename = {}
for strCol in listColumns:
if np.bitwise_or(strCol == 'external_gene_name', strCol == 'AveExpr'):
dictColToRename[strCol] = strCol
else:
dictColToRename[strCol] = strCellLine + ':' + strCol
dfIn.rename(columns=dictColToRename,
inplace=True)
listDFToMerge.append(dfIn)
dfMerged = pd.concat(listDFToMerge, axis=1, sort=True)
return dfMerged
def tcga_scores(flagResult=False,
dfIn=pd.DataFrame(),
flagPerformExtraction=False):
strTempFileName = 'TCGA-BRCA-EpiMesScores.tsv'
pathOut = os.path.join(PathDir.pathOutFolder, 'figure_5')
if not os.path.exists(os.path.join(pathOut, strTempFileName)):
flagPerformExtraction = True
if flagPerformExtraction:
listTCGAGenes = dfIn.index.tolist()
listTCGASamples = dfIn.columns.tolist()
numSamples = len(listTCGASamples)
dictEpiMesCellLine = Process.tan2012_tissue_genes()
listEpiTissueGenes = dictEpiMesCellLine['epi_genes']
listMesTissueGenes = dictEpiMesCellLine['mes_genes']
# create lists of the cell line/tissue epithelial/mesenchymal gene lists for scoring
listOutputEpiTissueGenesMatched = [strGene for strGene in listTCGAGenes
if strGene.split('|')[0] in listEpiTissueGenes]
listOutputMesTissueGenesMatched = [strGene for strGene in listTCGAGenes
if strGene.split('|')[0] in listMesTissueGenes]
dfScoresOut = pd.DataFrame(
{'Epithelial Score':np.zeros(numSamples, dtype=float),
'Mesenchymal Score':np.zeros(numSamples, dtype=float)},
index=listTCGASamples)
for iSample in range(numSamples):
print('Patient ' + '{}'.format(iSample))
strSample = listTCGASamples[iSample]
dfScore = score(up_gene=listOutputEpiTissueGenesMatched,
sample=dfIn[[strSample]])
dfScoresOut.loc[strSample,'Epithelial Score'] = \
dfScore['total_score'].values.astype(float)[0]
dfScore = score(up_gene=listOutputMesTissueGenesMatched,
sample=dfIn[[strSample]])
dfScoresOut.loc[strSample,'Mesenchymal Score'] = \
dfScore['total_score'].values.astype(float)[0]
dfScoresOut.to_csv(os.path.join(pathOut, strTempFileName),
sep='\t')
else:
dfScoresOut = pd.read_table(os.path.join(pathOut, strTempFileName),
sep='\t', index_col=0, header=0)
return dfScoresOut
def tcga_brca(flagResult=False,
flagPerformExtraction=False):
strPanCanRNASeqFile = 'EBPlusPlusAdjustPANCAN_IlluminaHiSeq_RNASeqV2.geneExp.tsv'
strTempFileName = 'TCGA_BrCa_PreProc_RNA.pickle'
if not os.path.exists(os.path.join(PathDir.pathPublicData, strTempFileName)):
flagPerformExtraction = True
if flagPerformExtraction:
# extract the TCGA pan-cancer patient metadata
dfMeta = pd.read_excel(
os.path.join(PathDir.pathPublicData, 'TCGA-CDR-SupplementalTableS1.xlsx'),
header=0, index_col=0, sheet_name='TCGA-CDR')
dfMeta.set_index('bcr_patient_barcode', inplace=True)
# identify patients which are flagged as the breast cancer cohort
listBRCAPatients = dfMeta[dfMeta['type']=='BRCA'].index.tolist()
dfTCGAPanCanSamples = pd.read_table(
os.path.join(PathDir.pathPublicData, strPanCanRNASeqFile),
sep='\t', header=None, index_col=None, nrows=1)
listTCGAPanCanColumns = dfTCGAPanCanSamples.iloc[0,:].tolist()
listTCGAPanCanSamples = listTCGAPanCanColumns[1:]
# extract primary tumour (index 01) samples from the full sample list
listBRCASamples = [strSample for strSample in listTCGAPanCanSamples
if np.bitwise_and(strSample[0:len('TCGA-NN-NNNN')] in listBRCAPatients,
strSample[13:15]=='01')]
# # # # # # # # # # # # # # # # # # # # #
# extract the TCGA pan-cancer RNA-seq data
#take this subset
dfTCGABrCa = pd.read_table(
os.path.join(PathDir.pathPublicData, strPanCanRNASeqFile),
sep='\t', header=0, index_col=0,
usecols=[listTCGAPanCanColumns[0]]+listBRCASamples)
dfTCGABrCa.to_pickle(os.path.join(PathDir.pathPublicData, strTempFileName))
else:
dfTCGABrCa = pd.read_pickle(os.path.join(PathDir.pathPublicData, strTempFileName))
return dfTCGABrCa
def ccle_brca(flagResult=False,
flagPerformExtraction=False):
strTempFile = 'CCLE_BRCA_RNA_Abund.tsv'
if not os.path.exists(os.path.join(PathDir.pathPublicData, strTempFile)):
flagPerformExtraction = True
if flagPerformExtraction:
#https://ndownloader.figshare.com/files/35020903
dfMetaData = pd.read_table(os.path.join(PathDir.pathPublicData, 'sample_info.csv'),
sep=',', index_col=0, header=0)
listBRCALinesACH = dfMetaData[dfMetaData['primary_disease'] == 'Breast Cancer'].index.tolist()
dictACHToCCLE = dict(zip(listBRCALinesACH,
dfMetaData['CCLE_Name'].reindex(listBRCALinesACH).values.tolist()))
#https://ndownloader.figshare.com/files/34989919
dfCCLE = pd.read_table(os.path.join(PathDir.pathPublicData, 'CCLE_expression.csv'),
sep=',', index_col=0, header=0)
dfBrCa = dfCCLE.reindex(listBRCALinesACH).copy(deep=True)
dfBrCa.rename(
index=dict(zip(listBRCALinesACH,[dictACHToCCLE[strLine] for strLine in listBRCALinesACH])),
inplace=True)
dfBrCa.to_csv(os.path.join(PathDir.pathPublicData, strTempFile),
sep='\t')
else:
dfBrCa = pd.read_table(os.path.join(PathDir.pathPublicData, strTempFile),
sep='\t', index_col=0)
return dfBrCa
def ccle_scores(flagResult=False,
flagPerformExtraction=False,
dfIn= | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000)
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('H')[0]),
frequencies.get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = frequencies.Resolution
self.assertEqual(Reso.get_str_from_freq('A'), 'year')
self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter')
self.assertEqual(Reso.get_str_from_freq('M'), 'month')
self.assertEqual(Reso.get_str_from_freq('D'), 'day')
self.assertEqual(Reso.get_str_from_freq('H'), 'hour')
self.assertEqual(Reso.get_str_from_freq('T'), 'minute')
self.assertEqual(Reso.get_str_from_freq('S'), 'second')
self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond')
self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond')
self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond')
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
self.assertEqual(freq, result)
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
self.assertEqual(freq, result)
def test_get_freq_code(self):
# freqstr
self.assertEqual(frequencies.get_freq_code('A'),
(frequencies.get_freq('A'), 1))
self.assertEqual(frequencies.get_freq_code('3D'),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code('-2M'),
(frequencies.get_freq('M'), -2))
# tuple
self.assertEqual(frequencies.get_freq_code(('D', 1)),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(('A', 3)),
(frequencies.get_freq('A'), 3))
self.assertEqual(frequencies.get_freq_code(('M', -2)),
(frequencies.get_freq('M'), -2))
# numeric tuple
self.assertEqual(frequencies.get_freq_code((1000, 1)), (1000, 1))
# offsets
self.assertEqual(frequencies.get_freq_code(offsets.Day()),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Day(3)),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Day(-2)),
(frequencies.get_freq('D'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd()),
(frequencies.get_freq('M'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(3)),
(frequencies.get_freq('M'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(-2)),
(frequencies.get_freq('M'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.Week()),
(frequencies.get_freq('W'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3)),
(frequencies.get_freq('W'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2)),
(frequencies.get_freq('W'), -2))
# monday is weekday=0
self.assertEqual(frequencies.get_freq_code(offsets.Week(weekday=1)),
(frequencies.get_freq('W-TUE'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3, weekday=0)),
(frequencies.get_freq('W-MON'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2, weekday=4)),
(frequencies.get_freq('W-FRI'), -2))
_dti = DatetimeIndex
class TestFrequencyInference(tm.TestCase):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
self.assertRaises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
self.assertEqual(frequencies.infer_freq(index), 'B')
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(index), 'D')
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(dates), 'D')
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
self.assertEqual(frequencies.infer_freq(index), exp_freq)
index = _dti([b + base_delta * 7] +
[b + base_delta * j for j in range(3)])
self.assertIsNone(frequencies.infer_freq(index))
index = _dti([b + base_delta * j for j in range(3)] +
[b + base_delta * 7])
self.assertIsNone(frequencies.infer_freq(index))
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
self.assertRaises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
#All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
self.assertEqual(rng.inferred_freq, 'M')
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
self.assertEqual(rng.inferred_freq, 'A-JAN')
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-DEC')
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-NOV')
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-OCT')
def test_infer_freq_tz(self):
freqs = {'AS-JAN': ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT': ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT': ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00']
}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
self.assertEqual(idx.inferred_freq, expected)
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], #Fall DST
['2014-03-08', '2014-03-11'], #Spring DST
['2014-01-01', '2014-01-03']] #Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U', '3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
self.assertEqual(idx.inferred_freq, freq)
index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago")
self.assertIsNone(index.inferred_freq)
def test_infer_freq_businesshour(self):
# GH 7905
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
# hourly freq in a day must result in 'H'
self.assertEqual(idx.inferred_freq, 'H')
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
'2014-07-01 15:00', '2014-07-01 16:00',
'2014-07-02 09:00', '2014-07-02 10:00', '2014-07-02 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00', '2014-07-07 16:00',
'2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00',
'2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00',
'2014-07-08 15:00', '2014-07-08 16:00'])
self.assertEqual(idx.inferred_freq, 'BH')
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
self.assertEqual(rng.inferred_freq, '-1A-JAN')
def test_non_datetimeindex(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
self.assertEqual(result, rng.inferred_freq)
def test_invalid_index_types(self):
# test all index types
for i in [ tm.makeIntIndex(10),
tm.makeFloatIndex(10),
tm.makePeriodIndex(10) ]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(i))
# GH 10822
# odd error message on conversions to datetime for unicode
if not is_platform_windows():
for i in [ tm.makeStringIndex(10),
tm.makeUnicodeIndex(10) ]:
self.assertRaises(ValueError, lambda : frequencies.infer_freq(i))
def test_string_datetimelike_compat(self):
# GH 6463
expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03', '2004-04'])
result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03', '2004-04']))
self.assertEqual(result,expected)
def test_series(self):
# GH6407
# inferring series
# invalid type of Series
for s in [ Series(np.arange(10)),
Series(np.arange(10.))]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# a non-convertible string
self.assertRaises(ValueError, lambda : frequencies.infer_freq(Series(['foo','bar'])))
# cannot infer on PeriodIndex
for freq in [None, 'L']:
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
for freq in ['Y']:
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# DateTimeIndex
for freq in ['M', 'L', 'S']:
s = Series(date_range('20130101',periods=10,freq=freq))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,freq)
s = Series(date_range('20130101','20130110'))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,'D')
def test_legacy_offset_warnings(self):
for k, v in compat.iteritems(frequencies._rule_aliases):
with tm.assert_produces_warning(FutureWarning):
result = frequencies.get_offset(k)
exp = frequencies.get_offset(v)
self.assertEqual(result, exp)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
idx = date_range('2011-01-01', periods=5, freq=k)
exp = date_range('2011-01-01', periods=5, freq=v)
self.assert_index_equal(idx, exp)
MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC']
def test_is_superperiod_subperiod():
assert(frequencies.is_superperiod(offsets.YearEnd(), offsets.MonthEnd()))
assert(frequencies.is_subperiod(offsets.MonthEnd(), offsets.YearEnd()))
assert(frequencies.is_superperiod(offsets.Hour(), offsets.Minute()))
assert(frequencies.is_subperiod(offsets.Minute(), offsets.Hour()))
assert(frequencies.is_superperiod(offsets.Second(), offsets.Milli()))
assert(frequencies.is_subperiod(offsets.Milli(), offsets.Second()))
assert(frequencies.is_superperiod(offsets.Milli(), | offsets.Micro() | pandas.tseries.offsets.Micro |
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
arrays = [ | pd.array([1, 2, 3, None], dtype=dtype) | pandas.array |
# -*- coding: utf-8 -*-
"""carpriceprediction.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1yHxqpV_thjXvv6gs4Ya5ZY_KF6PDiYyx
"""
from google.colab import drive
drive.mount('/content/drive')
import pandas as pd
data = | pd.read_csv('/car data.csv') | pandas.read_csv |
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import scipy as sc
import pickle
import os
from . import preprocess
from scipy.sparse import vstack, csr_matrix, csc_matrix, lil_matrix
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
from . import builders
class Dataset(object):
@staticmethod
def load():
train = pd.read_csv('data/train_final.csv', delimiter='\t')
playlists = pd.read_csv('data/playlists_final.csv', delimiter='\t')
target_playlists = pd.read_csv('data/target_playlists.csv', delimiter='\t')
target_tracks = pd.read_csv('data/target_tracks.csv', delimiter = '\t')
tracks = pd.read_csv('data/tracks_final.csv', delimiter='\t')
return Dataset(train, tracks, playlists, target_tracks, target_playlists)
def __init__(self, train, tracks, playlists, target_tracks, target_playlists):
self.train = train
self.tracks = tracks
self.playlists = playlists
self.target_tracks = target_tracks
self.target_playlists = target_playlists
def _normalize_train_dataset(self):
self.track_to_num = pd.Series(self.tracks.index)
self.track_to_num.index = self.tracks['track_id_tmp']
self.playlist_to_num = | pd.Series(self.playlists.index) | pandas.Series |
import numpy as np
import pandas as pd
from bach import Series, DataFrame
from bach.operations.cut import CutOperation, QCutOperation
from sql_models.util import quote_identifier
from tests.functional.bach.test_data_and_utils import assert_equals_data
PD_TESTING_SETTINGS = {
'check_dtype': False,
'check_exact': False,
'atol': 1e-3,
}
def compare_boundaries(expected: pd.Series, result: Series) -> None:
for exp, res in zip(expected.to_numpy(), result.to_numpy()):
if not isinstance(exp, pd.Interval):
assert res is None or np.isnan(res)
continue
np.testing.assert_almost_equal(exp.left, float(res.left), decimal=2)
np.testing.assert_almost_equal(exp.right, float(res.right), decimal=2)
if exp.closed_left:
assert res.closed_left
if exp.closed_right:
assert res.closed_right
def test_cut_operation_pandas(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = pd.cut(p_series, bins=10)
result = CutOperation(series=series, bins=10)()
compare_boundaries(expected, result)
expected_wo_right = pd.cut(p_series, bins=10, right=False)
result_wo_right = CutOperation(series, bins=10, right=False)()
compare_boundaries(expected_wo_right, result_wo_right)
def test_cut_operation_bach(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
ranges = [
pd.Interval(0, 9.9, closed='both'),
pd.Interval(9.9, 19.8, closed='right'),
pd.Interval(19.8, 29.7, closed='right'),
pd.Interval(29.7, 39.6, closed='right'),
pd.Interval(39.6, 49.5, closed='right'),
pd.Interval(49.5, 59.4, closed='right'),
pd.Interval(59.4, 69.3, closed='right'),
pd.Interval(69.3, 79.2, closed='right'),
pd.Interval(79.2, 89.1, closed='right'),
pd.Interval(89.1, 99, closed='right'),
]
expected = pd.Series({num: ranges[int(num / 10)] for num in range(100)})
result = CutOperation(series=series, bins=10, method='bach')().sort_index()
compare_boundaries(expected, result)
ranges_wo_right = [
pd.Interval(0, 9.9, closed='left'),
pd.Interval(9.9, 19.8, closed='left'),
pd.Interval(19.8, 29.7, closed='left'),
pd.Interval(29.7, 39.6, closed='left'),
pd.Interval(39.6, 49.5, closed='left'),
pd.Interval(49.5, 59.4, closed='left'),
pd.Interval(59.4, 69.3, closed='left'),
| pd.Interval(69.3, 79.2, closed='left') | pandas.Interval |
"""This script is designed to perform statistics of demographic information
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr,spearmanr,kendalltau
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import os
from eslearn.utils.lc_read_write_mat import read_mat, write_mat
#%% ----------------------------------Our center 550----------------------------------
uid_path_550 = r'D:\WorkStation_2018\SZ_classification\Scale\selected_550.txt'
scale_path_550 = r'D:\WorkStation_2018\SZ_classification\Scale\10-24大表.xlsx'
headmotion_file = r'D:\WorkStation_2018\SZ_classification\Scale\头动参数_1322.xlsx'
scale_data_550 = pd.read_excel(scale_path_550)
uid_550 = pd.read_csv(uid_path_550, header=None)
scale_selected_550 = pd.merge(uid_550, scale_data_550, left_on=0, right_on='folder', how='inner')
describe_bprs_550 = scale_selected_550.groupby('诊断')['BPRS_Total'].describe()
describe_age_550 = scale_selected_550.groupby('诊断')['年龄'].describe()
describe_duration_550 = scale_selected_550.groupby('诊断')['病程月'].describe()
describe_durgnaive_550 = scale_selected_550.groupby('诊断')['用药'].value_counts()
describe_sex_550 = scale_selected_550.groupby('诊断')['性别'].value_counts()
# Demographic
demographic_info_dataset1 = scale_selected_550[['folder', '诊断', '年龄', '性别', '病程月']]
headmotion = pd.read_excel(headmotion_file)
headmotion = headmotion[['Subject ID','mean FD_Power']]
demographic_info_dataset1 = pd.merge(demographic_info_dataset1, headmotion, left_on='folder', right_on='Subject ID', how='inner')
demographic_info_dataset1 = demographic_info_dataset1.drop(columns=['Subject ID'])
site_dataset1 = pd.DataFrame(np.zeros([len(demographic_info_dataset1),1]))
site_dataset1.columns = ['site']
demographic_dataset1_all = pd.concat([demographic_info_dataset1 , site_dataset1], axis=1)
demographic_dataset1_all.columns = ['ID','Diagnosis', 'Age', 'Sex', 'Duration', 'MeanFD', 'Site']
demographic_dataset1 = demographic_dataset1_all[['ID','Diagnosis', 'Age', 'Sex', 'MeanFD', 'Site']]
demographic_dataset1['Diagnosis'] = np.int32(demographic_dataset1['Diagnosis'] == 3)
# Duration and age
demographic_duration_dataset1 = demographic_dataset1_all[['Duration', 'Age']].dropna()
np.corrcoef(demographic_duration_dataset1['Duration'], demographic_duration_dataset1['Age'])
pearsonr(demographic_duration_dataset1['Duraton'], demographic_duration_dataset1['Age'])
#%% ----------------------------------BeiJing 206----------------------------------
uid_path_206 = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\SZ_NC_108_100.xlsx'
scale_path_206 = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\SZ_NC_108_100-WF.csv'
headmotion_file_206 = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\parameters\FD_power'
uid_to_remove = ['SZ010109','SZ010009']
scale_data_206 = pd.read_csv(scale_path_206)
scale_data_206 = scale_data_206.drop(np.array(scale_data_206.index)[scale_data_206['ID'].isin(uid_to_remove)])
scale_data_206['PANSStotal1'] = np.array([np.float64(duration) if duration.strip() !='' else 0 for duration in scale_data_206['PANSStotal1'].values])
Pscore = pd.DataFrame(scale_data_206[['P1', 'P2', 'P3', 'P4', 'P4', 'P5', 'P6', 'P7']].iloc[:106,:], dtype = np.float64)
Pscore = np.sum(Pscore, axis=1).describe()
Nscore = pd.DataFrame(scale_data_206[['N1', 'N2', 'N3', 'N4', 'N4', 'N5', 'N6', 'N7']].iloc[:106,:], dtype=np.float64)
Nscore = np.sum(Nscore, axis=1).describe()
Gscore = pd.DataFrame(scale_data_206[['G1', 'G2', 'G3', 'G4', 'G4', 'G5', 'G6', 'G7', 'G8', 'G9', 'G10', 'G11', 'G12', 'G13', 'G14', 'G15', 'G16']].iloc[:106,:])
Gscore = np.array(Gscore)
for i, itemi in enumerate(Gscore):
for j, itemj in enumerate(itemi):
print(itemj)
if itemj.strip() != '':
Gscore[i,j] = np.float64(itemj)
else:
Gscore[i, j] = np.nan
Gscore = pd.DataFrame(Gscore)
Gscore = np.sum(Gscore, axis=1).describe()
describe_panasstotol_206 = scale_data_206.groupby('group')['PANSStotal1'].describe()
describe_age_206 = scale_data_206.groupby('group')['age'].describe()
scale_data_206['duration'] = np.array([np.float64(duration) if duration.strip() !='' else 0 for duration in scale_data_206['duration'].values])
describe_duration_206 = scale_data_206.groupby('group')['duration'].describe()
describe_sex_206 = scale_data_206.groupby('group')['sex'].value_counts()
# Demographic
uid = pd.DataFrame(scale_data_206['ID'])
uid['ID'] = uid['ID'].str.replace('NC','10');
uid['ID'] = uid['ID'].str.replace('SZ','20');
uid = pd.DataFrame(uid, dtype=np.int32)
demographic_info_dataset2 = scale_data_206[['group','age', 'sex']]
demographic_info_dataset2 = pd.concat([uid, demographic_info_dataset2], axis=1)
headmotion_name_dataset2 = os.listdir(headmotion_file_206)
headmotion_file_path_dataset2 = [os.path.join(headmotion_file_206, name) for name in headmotion_name_dataset2]
meanfd = []
for i, file in enumerate(headmotion_file_path_dataset2):
fd = np.loadtxt(file)
meanfd.append(np.mean(fd))
meanfd_dataset2 = pd.DataFrame(meanfd)
headmotion_name_dataset2 = | pd.Series(headmotion_name_dataset2) | pandas.Series |
import requests
from collections import Counter
import pandas as pd
def get_data(study_id):
response = requests.get(
"https://ega-archive.org/metadata/v2/samples?queryBy=study&queryId={}&limit=0".format(study_id)
)
if response.status_code != 200:
print("Study {} got error code {}".format(study_id, response.status_code))
return []
return response.json()['response']["result"]
def get_study_info(study_id):
data = get_data(study_id)
counts = Counter(list(map(lambda x: x['gender'], data)))
if len(data) > 0:
year = data[0]['creationTime'][:4]
else:
year = "-1"
#Transform it to a dict
row = dict(counts)
row['date'] = year
row['identifier'] = study_id
row['total'] = len(data)
row['database'] = 'EGA'
return row
def get_study_list(filename):
with open(filename) as fi:
study_list = list(fi.readlines())
study_list = list(map(lambda x: x.strip(), study_list))
return study_list
def main(load_file=None, skip_first=0, skip_numbers = []):
"""
Reads Studies from EGA and counts the `gender` values for the data for each
study.
It outputs a final CSV summarizing the counts of `gender` for each study.
It also produces a lot of backup files in case there is a problem do not start again.
params:
-------
load_file: name of the last backup file to load.
skip_first: how many studies it will skip.
skip_numbers: list of the number of the study that will not collect data from.
"""
study_list = get_study_list("../EGA_studies_list.txt")
rows = []
counter = 0
if load_file != None:
rows = pd.read_csv(load_file,sep=';').to_dict('records')
for s in study_list:
counter+=1
# skip the first n studies
if counter < skip_first:
continue
# Skip the specified numbers
if counter in skip_numbers:
continue
print("getting study {} - {}".format(counter, s))
rows.append(get_study_info(s))
if counter%10 == 0:
| pd.DataFrame(rows) | pandas.DataFrame |
import json
from django.http import HttpResponse
from .models import (
Invoice,
Seller,
Receiver,
)
from .serializers import (
InvoiceSerializer,
SellerSerializer,
ReceiverSerializer,
)
import re
from django.views import View
from django.http import Http404
import pandas as pd
import datetime as dt
def get_object_invoice(pk):
try:
return Invoice.objects.get(pk=pk)
except Invoice.DoesNotExist:
raise Http404
def get_object_seller(pk):
try:
return Seller.objects.get(pk=pk)
except Seller.DoesNotExist:
raise Http404
def get_object_receiver(pk):
try:
return Receiver.objects.get(pk=pk)
except Receiver.DoesNotExist:
raise Http404
class InvoiceShowDelete(View):
def get(self, request, pk):
invoice = get_object_invoice(pk)
serializer = InvoiceSerializer(invoice)
return HttpResponse(json.dumps(serializer.data), status=200)
def delete(self, request, pk):
invoice = get_object_invoice(pk)
invoice.delete()
return HttpResponse(status=204)
class InvoiceCreateList(View):
def get(self, request):
invoices = Invoice.objects.all()
serializer = InvoiceSerializer(invoices, many=True)
return HttpResponse(json.dumps(serializer.data))
def post(self, request):
dict_invoice = {}
dict_seller = {}
dict_receiver = {}
json_dict = None
if request.body:
json_dict = json.loads(request.body)
elif request.POST:
json_dict = request.POST
# access_key, uf_code_seller, cnpj_seller, number
access_key = json_dict['main_access_key'].replace(' ', '')
uf_code_seller = access_key[0:2]
cnpj_seller = access_key[6:20]
number = access_key[25:34]
dict_invoice['access_key'] = access_key
dict_invoice['number'] = number
dict_seller['uf_code'] = uf_code_seller
dict_seller['cnpj'] = cnpj_seller
# cpf_cnpj_receiver
cpf_cnpj_receiver = json_dict['sender_cnpj_cpf']
cpf_cnpj_receiver = re.search(
r'\d{11}|\d{14}|\d{3}\.\d{3}\.\d{3}\-\d{2}|\d{2}\.\d{3}\.\d{3}\/\d{4}\-\d{2}',
cpf_cnpj_receiver,
re.M | re.I
)
cpf_cnpj_receiver = str(cpf_cnpj_receiver.group())
cpf_cnpj_receiver = cpf_cnpj_receiver.replace('-', '')
cpf_cnpj_receiver = cpf_cnpj_receiver.replace('.', '')
cpf_cnpj_receiver = cpf_cnpj_receiver.replace('/', '')
cpf_cnpj_receiver = cpf_cnpj_receiver.replace(' ', '')
dict_receiver['cpf_cnpj'] = cpf_cnpj_receiver
# operation_nature
dict_invoice['operation_nature'] = json_dict['main_nature_operation']
# authorization_protocol
dict_invoice['authorization_protocol'] = json_dict['main_protocol_authorization_use']
# state_registration
dict_invoice['state_registration'] = json_dict['main_state_registration']
# emission_date
emission_date = json_dict['sender_emission_date']
emission_date = re.search(r'\d{2}\/\d{2}\/\d{4}', emission_date, re.M | re.I)
emission_date = str(emission_date.group())
emission_date = emission_date.split('/')
emission_date = emission_date[2] + '-' + emission_date[1] + '-' + emission_date[0]
dict_invoice['emission_date'] = emission_date
# entry_exit_datetime
entry_exit_datetime = json_dict['sender_out_input_date']
entry_exit_datetime = entry_exit_datetime.split('/')
time = json_dict['sender_output_time']
u = entry_exit_datetime[2] + '-' + entry_exit_datetime[1] + '-' + entry_exit_datetime[0] + 'T' + time
entry_exit_datetime = u
dict_invoice['entry_exit_datetime'] = entry_exit_datetime
# total_products_value
total_products_value = json_dict['tax_total_cost_products']
total_products_value = total_products_value.replace('.', '')
total_products_value = total_products_value.replace(',', '.')
dict_invoice['total_products_value'] = float(total_products_value)
# total_invoice_value
total_invoice_value = json_dict['tax_cost_total_note']
total_invoice_value = total_invoice_value.replace('.', '')
total_invoice_value = total_invoice_value.replace(',', '.')
dict_invoice['total_invoice_value'] = float(total_invoice_value)
# basis_calculation_icms
basis_calculation_icms = json_dict['tax_icms_basis']
basis_calculation_icms = basis_calculation_icms.replace('.', '')
basis_calculation_icms = basis_calculation_icms.replace(',', '.')
dict_invoice['basis_calculation_icms'] = float(basis_calculation_icms)
# freight_value
freight_value = json_dict['tax_cost_freight']
freight_value = freight_value.replace('.', '')
freight_value = freight_value.replace(',', '.')
dict_invoice['freight_value'] = float(freight_value)
# insurance_value
insurance_value = json_dict['tax_cost_insurance']
insurance_value = insurance_value.replace('.', '')
insurance_value = insurance_value.replace(',', '.')
dict_invoice['insurance_value'] = float(insurance_value)
# icms_value
icms_value = json_dict['tax_cost_icms']
icms_value = icms_value.replace('.', '')
icms_value = icms_value.replace(',', '.')
dict_invoice['icms_value'] = float(icms_value)
# discount_value
discount_value = json_dict['tax_discount']
discount_value = discount_value.replace('.', '')
discount_value = discount_value.replace(',', '.')
dict_invoice['discount_value'] = float(discount_value)
# basis_calculation_icms_st
basis_calculation_icms_st = json_dict['tax_icms_basis_st']
basis_calculation_icms_st = basis_calculation_icms_st.replace('.', '')
basis_calculation_icms_st = basis_calculation_icms_st.replace(',', '.')
dict_invoice['basis_calculation_icms_st'] = float(basis_calculation_icms_st)
# icms_value_st
icms_value_st = json_dict['tax_cost_icms_replacement']
icms_value_st = icms_value_st.replace('.', '')
icms_value_st = icms_value_st.replace(',', '.')
dict_invoice['icms_value_st'] = float(icms_value_st)
# other_expenditure
other_expenditure = json_dict['tax_other_expenditure']
other_expenditure = other_expenditure.replace('.', '')
other_expenditure = other_expenditure.replace(',', '.')
dict_invoice['other_expenditure'] = float(other_expenditure)
# ipi_value
ipi_value = json_dict['tax_cost_ipi']
ipi_value = ipi_value.replace('.', '')
ipi_value = ipi_value.replace(',', '.')
dict_invoice['ipi_value'] = float(ipi_value)
# receiver
dict_receiver['name'] = json_dict['sender_name_social']
dict_receiver['address'] = json_dict['sender_address']
dict_receiver['neighborhood'] = json_dict['sender_neighborhood_district']
dict_receiver['cep'] = json_dict['sender_cep'].replace('-', '')
dict_receiver['county'] = json_dict['sender_county']
dict_receiver['uf'] = json_dict['sender_uf']
dict_receiver['phone'] = json_dict['sender_phone_fax']
# ------------------------
if Receiver.objects.filter(cpf_cnpj=cpf_cnpj_receiver).count() == 1:
receiver = Receiver.objects.get(cpf_cnpj=cpf_cnpj_receiver)
dict_invoice['receiver'] = receiver.pk
else:
receiver_serializer = ReceiverSerializer(data=dict_receiver)
if receiver_serializer.is_valid():
receiver_serializer.save()
else:
return HttpResponse(
json.dumps([
receiver_serializer.errors,
]),
status=400
)
dict_invoice['receiver'] = receiver_serializer.data['id']
if Seller.objects.filter(cnpj=cnpj_seller).count() == 1:
seller = Seller.objects.get(cnpj=cnpj_seller)
dict_invoice['seller'] = seller.pk
else:
seller_serializer = SellerSerializer(data=dict_seller)
if seller_serializer.is_valid():
seller_serializer.save()
else:
return HttpResponse(
json.dumps([
seller_serializer.errors,
]),
status=400
)
dict_invoice['seller'] = seller_serializer.data['id']
invoice_serializer = InvoiceSerializer(data=dict_invoice)
if invoice_serializer.is_valid():
invoice_serializer.save()
else:
return HttpResponse(
json.dumps(
invoice_serializer.errors
),
status=400
)
return HttpResponse(
json.dumps([
invoice_serializer.data,
]),
status=200
)
def sellerShow(request, pk):
if request.method == 'GET':
seller = get_object_seller(pk)
serializer = SellerSerializer(seller)
return HttpResponse(json.dumps(serializer.data), status=200)
return HttpResponse(status=400)
def receiverShow(request, pk):
if request.method == 'GET':
receiver = get_object_receiver(pk)
serializer = ReceiverSerializer(receiver)
return HttpResponse(json.dumps(serializer.data), status=200)
return HttpResponse(status=400)
def sellerList(request):
if request.method == 'GET':
seller = Seller.objects.all()
serializer = SellerSerializer(seller, many=True)
return HttpResponse(json.dumps(serializer.data))
return HttpResponse(status=400)
def receiverList(request):
if request.method == 'GET':
receiver = Receiver.objects.all()
serializer = ReceiverSerializer(receiver, many=True)
return HttpResponse(json.dumps(serializer.data))
return HttpResponse(status=400)
def chart_total_value_per_time(request):
if request.method == 'GET':
invoices = Invoice.objects.all()
date = []
total = []
for invoice in invoices:
date.append(invoice.emission_date)
total.append(invoice.total_invoice_value)
df = pd.DataFrame({'date': date, 'total': total})
df = df.sort_values(by='date')
sf = df.groupby('date')['total'].sum()
df = pd.DataFrame({'date': sf.index, 'total': sf.values})
df['date'] = pd.to_datetime(df['date']).apply(lambda x: x.strftime('%d/%m/%Y'))
df['total'] = pd.to_numeric(df['total'].apply(lambda x: round(x, 2)))
data = df.to_dict('list')
df = pd.DataFrame({'dateM': date, 'totalM': total})
df = df.sort_values(by='dateM')
df['dateM'] = pd.to_datetime(df['dateM']).apply(lambda x: x.strftime('%Y-%m'))
sf = df.groupby('dateM')['totalM'].sum()
df = pd.DataFrame({'dateM': sf.index, 'totalM': sf.values})
df['dateM'] = | pd.to_datetime(df['dateM']) | pandas.to_datetime |
##############################
## Import requirements
## NOTE: Must do sudo pip install XlsxWriter
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
import xlsxwriter
##############################
## Generator - Create list with the identifier
def identifier(num):
identifier = []
identifier = [i+1 for i in xrange(num)]
return identifier
## Generator - Create a list of random genders
def genderrecord(num):
genderrecord = []
for i in range (num):
gender = ['Male','Female']
genderchoice = random.choice(gender)
genderrecord.append(genderchoice)
i = i + 1
return genderrecord
## Corruptor - Request information on the number of corruptions to make
def gender_corrupting_perc(num):
corruptper= float(raw_input ("What percentage of records would you like to corrupt?"))
records_for_corruption = num
corrupt_amount = int(round((corruptper /100) * num,0))
if (corruptper >=1) and (corrupt_amount == 0):
corrupt_amount = 1
return corrupt_amount
## Corruptor - Replace values with "."
def missing_corrupt_func(copydf, newdf, corrupt_amount):
remove = np.random.choice(range(len(copydf)), corrupt_amount, replace=False)
j = 0
for j in range(len(remove)):
place = remove[j]
newdf[place] = "."
j = j+1
return newdf
########################################
## Asking users how many records to generate and creating index
number = int(raw_input ("How many records would you like to generate?"))
identifier = identifier(number)
## Creating random genders
genderrecord = genderrecord (number)
## Storing genders and index into a dataframe
record = pd.DataFrame( {"Identifier" : identifier,
"Gender" : genderrecord })
## Printing "Clean" file to excel
writer = | pd.ExcelWriter('CleanData', engine='xlsxwriter') | pandas.ExcelWriter |
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__),'..'))
import numpy as np
import pandas as pd
import configparser
from Conf.loadconf import *
import DB.db_connects as dbconn
df_test = pd.DataFrame()
df_train = | pd.DataFrame() | pandas.DataFrame |
import pyaniasetools as aat
import pyanitools as ant
import hdnntools as hdt
import pandas as pd
import sys
import numpy as np
import re
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.colors import LogNorm
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib.backends.backend_pdf import PdfPages
#import seaborn as sns
pd.options.display.float_format = '{:.2f}'.format
# ----------------------------------
# Plot force histogram
# ----------------------------------
def plot_corr_dist_axes(ax, Xp, Xa, cmap, labelx, labely, plabel, vmin=0, vmax=0, inset=True):
Fmx = Xa.max()
Fmn = Xa.min()
# Plot ground truth line
ax.plot([Fmn, Fmx], [Fmn, Fmx], '--', c='red', linewidth=3)
# Set labels
ax.set_xlabel(labelx, fontsize=26)
ax.set_ylabel(labely, fontsize=26)
# Plot 2d Histogram
if vmin == 0 and vmax ==0:
bins = ax.hist2d(Xp, Xa, bins=200, norm=LogNorm(), range=[[Fmn, Fmx], [Fmn, Fmx]], cmap=cmap)
else:
bins = ax.hist2d(Xp, Xa, bins=200, norm=LogNorm(), range=[[Fmn, Fmx], [Fmn, Fmx]], cmap=cmap, vmin=vmin, vmax=vmax)
# Build color bar
#cbaxes = fig.add_axes([0.91, 0.1, 0.03, 0.8])
# Annotate with label
ax.text(0.25*((Fmx-Fmn))+Fmn, 0.06*((Fmx-Fmn))+Fmn, plabel, fontsize=26)
# Annotate with errors
PMAE = hdt.calculatemeanabserror(Xa, Xp)
PRMS = hdt.calculaterootmeansqrerror(Xa, Xp)
ax.text(0.6*((Fmx-Fmn))+Fmn, 0.2*((Fmx-Fmn))+Fmn, 'MAE='+"{:.3f}".format(PMAE)+'\nRMSE='+"{:.3f}".format(PRMS), fontsize=30,
bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5})
if inset:
axins = zoomed_inset_axes(ax, 2., loc=2) # zoom = 6
sz = 0.1*(Fmx-Fmn)
axins.hist2d(Xp, Xa, bins=50, range=[[Xa.mean() - sz, Xa.mean() + sz], [Xp.mean() - sz, Xp.mean() + sz]], norm=LogNorm(), cmap=cmap)
axins.plot([Xp.mean() - sz, Xp.mean() + sz], [Xp.mean() - sz, Xp.mean() + sz], '--', c='r', linewidth=3)
# sub region of the original image
x1, x2, y1, y2 = Xa.mean() - sz, Xa.mean() + sz, Xp.mean() - sz, Xp.mean() + sz
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
axins.yaxis.tick_right()
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="1.5")
plt.xticks(visible=True)
plt.yticks(visible=True)
return bins
def add_inset_histogram(Xa, Xp, pos, ylim, xlim):
Ferr = Xa - Xp
std = np.std(Ferr)
men = np.mean(Ferr)
axh = plt.axes(pos)
axh.hist(Ferr, bins=75, range=[men - 4 * std, men + 4 * std], normed=False)
axh.set_ylim(ylim)
axh.set_xlim(xlim)
#axh.set_title('Difference distribution')
# ----------------------------------
# Plot force histogram
# ----------------------------------
def plot_corr_dist(Xa, Xp, inset=True, xlabel='$F_{dft}$' + r' $(kcal \times mol^{-1} \times \AA^{-1})$', ylabel='$F_{dft}$' + r' $(kcal \times mol^{-1} \times \AA^{-1})$', figsize=[13,10], cmap=mpl.cm.viridis):
Fmx = Xa.max()
Fmn = Xa.min()
label_size = 14
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
fig, ax = plt.subplots(figsize=figsize)
# Plot ground truth line
ax.plot([Fmn, Fmx], [Fmn, Fmx], '--', c='r', linewidth=3)
# Set labels
ax.set_xlabel(xlabel, fontsize=22)
ax.set_ylabel(ylabel, fontsize=22)
#cmap = mpl.cm.viridis
#cmap = mpl.cm.brg
# Plot 2d Histogram
bins = ax.hist2d(Xa, Xp, bins=200, norm=LogNorm(), range= [[Xa.min(), Xa.max()], [Xp.min(), Xp.max()]], cmap=cmap)
# Build color bar
#cbaxes = fig.add_axes([0.91, 0.1, 0.03, 0.8])
cb1 = fig.colorbar(bins[-1], cmap=cmap)
cb1.set_label('Count', fontsize=16)
# Annotate with errors
PMAE = hdt.calculatemeanabserror(Xa, Xp)
PRMS = hdt.calculaterootmeansqrerror(Xa, Xp)
ax.text(0.75*((Fmx-Fmn))+Fmn, 0.43*((Fmx-Fmn))+Fmn, 'MAE='+"{:.3f}".format(PMAE)+'\nRMSE='+"{:.3f}".format(PRMS), fontsize=20,
bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5})
if inset:
axins = zoomed_inset_axes(ax, 2.2, loc=2) # zoom = 6
sz = 6
axins.hist2d(Xa, Xp,bins=50, range=[[Fmn/sz, Fmx/sz], [Fmn/sz, Fmx/sz]], norm=LogNorm(), cmap=cmap)
axins.plot([Xa.min(), Xa.max()], [Xa.min(), Xa.max()], '--', c='r', linewidth=3)
# sub region of the original image
x1, x2, y1, y2 = Fmn/sz, Fmx/sz, Fmn/sz, Fmx/sz
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
axins.yaxis.tick_right()
plt.xticks(visible=True)
plt.yticks(visible=True)
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="0.5")
Ferr = Xa - Xp
std = np.std(Ferr)
men = np.mean(Ferr)
axh = plt.axes([.49, .16, .235, .235])
axh.hist(Ferr, bins=75, range=[men-4*std, men+4*std], normed=True)
axh.set_title('Difference distribution')
#plt.draw()
plt.show()
class generate_ensemble_data(aat.anicrossvalidationconformer):
'''Constructor'''
def __init__(self, networks, tsfiles, gpu=0):
super().__init__(networks['cns'], networks['sae'], networks['nnf'], networks['nts'], gpu )
self.tsfiles = tsfiles
self.Nn = networks['nts']
'''Stat generator'''
def generate_stats(self, maxe=sys.float_info.max, forces=True, grad=False):
self.tdata = dict()
for key in self.tsfiles.keys():
print(' -Working on',key,'...')
cdata = dict({'Eani': [],
'Edft': [],
'Erel': [],
'Fani': [],
'Fdft': [],
'dEani': [],
'dEdft': [],
'Na': [],
'Na2': [],})
for file in self.tsfiles[key]:
print(key,file)
adl = ant.anidataloader(file)
for i, data in enumerate(adl):
#if i > 5:
# break
if data['coordinates'].shape[0] != 0:
Eani, Fani, sig = self.compute_energyandforce_conformations(np.array(data['coordinates'],dtype=np.float64), data['species'], ensemble=False)
midx = np.where( data['energies'] - data['energies'].min() < maxe/hdt.hatokcal )[0]
Eani = Eani[:,midx]
Edft = data['energies'][midx]
Erel = (data['energies'] - data['energies'].min())[midx]
Fani = Fani[:,midx,:,:]
if forces:
if grad:
Fdft = -data['forces'][midx]
else:
Fdft = data['forces'][midx]
else:
Fdft = 0.0*data['coordinates'][midx]
#Eestd = np.std(Eani, axis=0)/np.sqrt(len(data['species']))
Eeani = np.mean(Eani, axis=0).reshape(1,-1)
Feani = np.mean(Fani, axis=0).flatten().reshape(1,-1)
Fani = Fani.reshape(Fani.shape[0],-1)
Eani = np.vstack([Eani, Eeani])
Fani = np.vstack([Fani, Feani])
Edft = hdt.hatokcal * Edft
Fdft = hdt.hatokcal * Fdft.flatten()
cdata['Na'].append(np.full(Edft.size, len(data['species']), dtype=np.int32))
cdata['Eani'].append(Eani)
cdata['Edft'].append(Edft)
cdata['Erel'].append(Erel)
cdata['Fani'].append(Fani)
cdata['Fdft'].append(Fdft)
#cdata['Frmse'].append(np.sqrt(np.mean((Fani-Fdft).reshape(Fdft.shape[0], -1)**2, axis=1)))
#cdata['Frmae'].append(np.sqrt(np.mean(np.abs((Fani - Fdft).reshape(Fdft.shape[0], -1)), axis=1)))
cdata['dEani'].append(hdt.calculateKdmat(self.Nn+1, Eani))
cdata['dEdft'].append(hdt.calculatedmat(Edft))
cdata['Na2'].append(np.full(cdata['dEdft'][-1].size, len(data['species']), dtype=np.int32))
#cdata['Erani'].append(Eani-Eani.min())
#cdata['Erdft'].append(Edft-Edft.min())
for k in ['Na', 'Na2', 'Edft', 'Fdft', 'dEdft', 'Erel']:
cdata[k] = np.concatenate(cdata[k])
for k in ['Eani', 'Fani', 'dEani']:
cdata[k] = np.hstack(cdata[k])
self.tdata.update({key: cdata})
''' Generate total errors '''
def store_data(self, filename):
if os.path.exists(filename):
os.remove(filename)
dpack = ant.datapacker(filename)
for k in self.tdata.keys():
dpack.store_data(k,**(self.tdata[k]))
dpack.cleanup()
names = ['E$_\mathrm{MAE}$$\mu$',
'E$_\mathrm{MAE}$$\sigma$',
'E$_\mathrm{RMS}$$\mu$',
'E$_\mathrm{RMS}$$\sigma$',
'$\Delta$E$_\mathrm{MAE}$$\mu$',
'$\Delta$E$_\mathrm{MAE}$$\sigma$',
'$\Delta$E$_\mathrm{RMS}$$\mu$',
'$\Delta$E$_\mathrm{RMS}$$\sigma$',
'F$_\mathrm{MAE}$$\mu$',
'F$_\mathrm{MAE}$$\sigma$',
'F$_\mathrm{RMS}$$\mu$',
'F$_\mathrm{RMS}$$\sigma$',
]
class evaluate_ensemble_data(aat.anicrossvalidationconformer):
'''Constructor'''
def __init__(self, datafile):
self.fdata = dict()
for df in datafile:
adl = ant.anidataloader(df)
tdata = dict()
for data in adl:
tdata.update({data['path'].split('/')[-1] : data})
adl.cleanup()
self.fdata[df.split('tsdata_')[-1].split('.h5')[0]] = tdata
''' Generate total errors '''
def generate_fullset_errors(self, ntkey, tslist):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
#tskeys = self.fdata[ntkey].keys()
if not tslist:
tskeys = self.fdata[ntkey].keys()
else:
tskeys = tslist
Nn = self.fdata[ntkey][list(tskeys)[0]]['Eani'].shape[0]-1
#print(self.fdata[ntkey][tskey]['Fdft'].shape)
return {names[0]: hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn,:] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys])),
names[1]: np.std(hdt.calculatemeanabserror(
np.hstack([self.fdata[ntkey][tskey]['Eani'][0:Nn,:] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys]), axis=1)),
names[2]: hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys])),
names[3]: np.std(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['Eani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys]), axis=1)),
names[4]: hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys])),
names[5]: np.std(hdt.calculatemeanabserror(
np.hstack([self.fdata[ntkey][tskey]['dEani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys]), axis=1)),
names[6]: hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys])),
names[7]: np.std(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['dEani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys]), axis=1)),
names[8]: hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['Fani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys])),
names[9]: np.std(hdt.calculatemeanabserror(
np.hstack([self.fdata[ntkey][tskey]['Fani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys]), axis=1)),
names[10]: hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Fani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys])),
names[11]: np.std(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['Fani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys]), axis=1)),
#'FMAEm': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Fani'][Nn,:], self.fdata[ntkey][tskey]['Fdft']),
#'FMAEs': np.std(hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Fani'][0:Nn,:], self.fdata[ntkey][tskey]['Fdft'], axis=1)),
#'FRMSm': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Fani'][Nn,:], self.fdata[ntkey][tskey]['Fdft']),
#'FRMSs': np.std(hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Fani'][0:Nn, :],self.fdata[ntkey][tskey]['Fdft'], axis=1)),
#'dEMAE': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['dEani'], self.fdata[ntkey][tskey]['dEdft']),
#'dERMS': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['dEani'], self.fdata[ntkey][tskey]['dEdft']),
#'ERMAE': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Erani'][idx], self.fdata[ntkey][tskey]['Erdft'][idx]),
#'ERRMS': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Erani'][idx], self.fdata[ntkey][tskey]['rdft'][idx]),
}
''' Generate total errors '''
def get_range_stats(self, tslist, dkey):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
ntkey = list(self.fdata.keys())[0]
if not tslist:
tskeys = self.fdata[ntkey].keys()
else:
tskeys = tslist
Nn = self.fdata[ntkey][list(tskeys)[0]][dkey].shape[0]-1
return np.concatenate([self.fdata[ntkey][tskey][dkey] for tskey in tskeys])
''' Generate total errors '''
def generate_fullset_peratom_errors(self, ntkey, tslist):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
if not tslist:
tskeys = self.fdata[ntkey].keys()
else:
tskeys = tslist
Nn = self.fdata[ntkey][list(tskeys)[0]]['Eani'].shape[0]-1
#print(self.fdata[ntkey]['GDB07to09']['Eani'][Nn,:])
#print(self.fdata[ntkey]['GDB07to09']['Na'])
#print(self.fdata[ntkey]['GDB07to09']['Eani'][Nn,:]/self.fdata[ntkey]['GDB07to09']['Na'])
return {names[0]: 1000*hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn,:]/self.fdata[ntkey][tskey]['Na'] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft']/self.fdata[ntkey][tskey]['Na'] for tskey in tskeys])),
names[2]: 1000*hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn, :]/self.fdata[ntkey][tskey]['Na'] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft']/self.fdata[ntkey][tskey]['Na'] for tskey in tskeys])),
names[4]: 1000*hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] / self.fdata[ntkey][tskey]['Na2'] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] / self.fdata[ntkey][tskey]['Na2'] for tskey in tskeys])),
names[6]: 1000*hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] / self.fdata[ntkey][tskey]['Na2'] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] / self.fdata[ntkey][tskey]['Na2'] for tskey in tskeys])),
}
''' Generate total errors '''
def generate_fullset_mean_errors(self, ntkey):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
tskeys = self.fdata[ntkey].keys()
Nn = self.fdata[ntkey][list(tskeys)[0]]['Eani'].shape[0]-1
return {names[2]+'E': hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn,:] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys])),
names[2]+'M': np.mean(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['Eani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys]),axis=1)),
names[6]+'E': hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys])),
names[6]+'M': np.mean(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['dEani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys]),axis=1)),
names[10]+'E': hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Fani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys])),
names[10]+'M': np.mean(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['Fani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys]),axis=1)),
}
''' Generate total errors '''
def generate_total_errors(self, ntkey, tskey):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
Nn = self.fdata[ntkey][tskey]['Eani'].shape[0]-1
return {names[0]: hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Eani'][Nn,:], self.fdata[ntkey][tskey]['Edft']),
names[1]: np.std(hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Eani'][0:Nn,:], self.fdata[ntkey][tskey]['Edft'], axis=1)),
names[2]: hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Eani'][Nn,:], self.fdata[ntkey][tskey]['Edft']),
names[3]: np.std(hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Eani'][0:Nn,:], self.fdata[ntkey][tskey]['Edft'], axis=1)),
names[4]: hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['dEani'][Nn,:], self.fdata[ntkey][tskey]['dEdft']),
names[5]: np.std(hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['dEani'][0:Nn,:], self.fdata[ntkey][tskey]['dEdft'], axis=1)),
names[6]: hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['dEani'][Nn,:], self.fdata[ntkey][tskey]['dEdft']),
names[7]: np.std(hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['dEani'][0:Nn,:], self.fdata[ntkey][tskey]['dEdft'], axis=1)),
names[8]: hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Fani'][Nn,:], self.fdata[ntkey][tskey]['Fdft']),
names[9]: np.std(hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Fani'][0:Nn,:], self.fdata[ntkey][tskey]['Fdft'], axis=1)),
names[10]: hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Fani'][Nn,:], self.fdata[ntkey][tskey]['Fdft']),
names[11]: np.std(hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Fani'][0:Nn, :],self.fdata[ntkey][tskey]['Fdft'], axis=1)),
#'dEMAE': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['dEani'], self.fdata[ntkey][tskey]['dEdft']),
#'dERMS': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['dEani'], self.fdata[ntkey][tskey]['dEdft']),
#'ERMAE': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Erani'][idx], self.fdata[ntkey][tskey]['Erdft'][idx]),
#'ERRMS': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Erani'][idx], self.fdata[ntkey][tskey]['rdft'][idx]),
}
def determine_min_error_by_sigma(self, ntkey, minerror, percent, tskeys = ['GDB07to09'], figsize=(15.0, 12.0), labelx='', labely='', xyrange=(0.0,10.0,0.0,10.0), storepath='', cmap=mpl.cm.viridis):
#tskeys = self.fdata[ntkey].keys()
mpl.rcParams['xtick.labelsize'] = 18
mpl.rcParams['ytick.labelsize'] = 18
Nn = self.fdata[ntkey][list(tskeys)[0]]['Eani'].shape[0]-1
Eani = np.hstack([self.fdata[ntkey][tskey]['Eani'][0:Nn, :] for tskey in tskeys])
Eanimu = np.hstack([self.fdata[ntkey][tskey]['Eani'][Nn, :] for tskey in tskeys])
#Eani = np.hstack([self.fdata[ntkey][tskey]['Eani'][Nn, :] for tskey in tskeys])
Edft = np.concatenate([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys])
#print(Eani.shape, Edft.shape, )
#print(np.max(Eerr.shape, axis=0))
Sani = np.concatenate([np.std(self.fdata[ntkey][tskey]['Eani'][0:Nn, :], axis=0) for tskey in tskeys])
Na = np.concatenate([self.fdata[ntkey][tskey]['Na'] for tskey in tskeys])
#print(Sani.shape, Na.shape)
Sani = Sani / np.sqrt(Na)
Eerr = np.max(np.abs(Eani - Edft),axis=0) / np.sqrt(Na)
#Eerr = np.abs(np.mean(Eani,axis=0) - Edft) / np.sqrt(Na)
#Eerr = np.abs(Eani - Edft) / np.sqrt(Na)
#print(Eerr)
#print(Sani)
Nmax = np.where(Eerr > minerror)[0].size
perc = 0
dS = Sani.max()
step = 0
while perc < percent:
S = dS - step*0.001
Sidx = np.where(Sani > S)
step += 1
perc = 100.0*np.where(Eerr[Sidx] > minerror)[0].size/(Nmax+1.0E-7)
#print(step,perc,S,Sidx)
#print('Step:',step, 'S:',S,' -Perc over:',perc,'Total',100.0*Sidx[0].size/Edft.size)
#dE = np.max(Eerr, axis=0) / np.sqrt(Na)
#print(Eerr.shape,Eerr)
So = np.where(Sani > S)
Su = np.where(Sani <= S)
print('RMSE Over: ', hdt.calculaterootmeansqrerror(Eanimu[So],Edft[So]))
print('RMSE Under: ', hdt.calculaterootmeansqrerror(Eanimu[Su],Edft[Su]))
fig, ax = plt.subplots(figsize=figsize)
poa = np.where(Eerr[So] > minerror)[0].size / So[0].size
pob = np.where(Eerr > minerror)[0].size / Eerr.size
ax.text(0.57*(xyrange[1]), 0.04*(xyrange[3]), 'Total Captured: ' + str(int(100.0 * Sidx[0].size / Edft.size)) + '%' +
'\n' + r'($\mathrm{\mathcal{E}>}$'+ "{:.1f}".format(minerror) + r'$\mathrm{) \forall \rho}$: ' + str(int(100*pob)) + '%' +
'\n' + r'($\mathrm{\mathcal{E}>}$'+ "{:.1f}".format(minerror) + r'$\mathrm{) \forall \rho >}$' + "{:.2f}".format(S) + ': ' + str(int(100*poa)) + '%' +
'\n' + r'$\mathrm{E}$ RMSE ($\mathrm{\rho>}$'+ "{:.2f}".format(S) + r'$\mathrm{)}$: ' + "{:.1f}".format(hdt.calculaterootmeansqrerror(Eanimu[So],Edft[So])) +
'\n' + r'$\mathrm{E}$ RMSE ($\mathrm{\rho\leq}$' + "{:.2f}".format(S) + r'$\mathrm{)}$: ' + "{:.1f}".format(hdt.calculaterootmeansqrerror(Eanimu[Su], Edft[Su])),
bbox={'facecolor':'grey', 'alpha':0.5, 'pad':10}, fontsize=18)
plt.axvline(x=S,linestyle='--',color='r',linewidth=5, label=r"$\mathrm{\rho=}$"+"{:.2f}".format(S) + ' is the value that captures\n'+ str(int(percent)) + '% of errors over ' + r"$\mathrm{\mathcal{E}=}$" + "{:.1f}".format(minerror))
#)
# Set labels
ax.set_xlabel(labelx, fontsize=24)
ax.set_ylabel(labely, fontsize=24)
# Plot 2d Histogram
bins = ax.hist2d(Sani, Eerr, bins=400, norm=LogNorm(), range=[[xyrange[0], xyrange[1]], [xyrange[2], xyrange[3]]], cmap=cmap)
# Build color bar
# cbaxes = fig.add_axes([0.91, 0.1, 0.03, 0.8])
cb1 = fig.colorbar(bins[-1], cmap=cmap)
cb1.set_label('Count', fontsize=20)
cb1.ax.tick_params(labelsize=18)
plt.legend(loc='upper center',fontsize=18)
if storepath:
pp = PdfPages(storepath)
pp.savefig(fig)
pp.close()
else:
plt.show()
def get_net_keys(self):
return self.fdata.keys()
def get_totalerror_table(self, tslist = []):
errors = dict()
for k in self.fdata.keys():
errors[k] = pd.Series(self.generate_fullset_errors(k, tslist))
pd.set_option('expand_frame_repr', False)
edat = pd.DataFrame(errors).transpose()
return edat
def get_totalerrorperatom_table(self, tslist = []):
errors = dict()
for k in self.fdata.keys():
errors[k] = pd.Series(self.generate_fullset_peratom_errors(k, tslist))
| pd.set_option('expand_frame_repr', False) | pandas.set_option |
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import TemplateView
import pandas as pd
from core.models import *
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
import datetime
def make_all_client():
qs = list(ClientModel.objects.values_list('name__name', 'task__task', 'subtask__subtask', 'time_spent'))
df = pd.DataFrame(qs, columns=['name', 'task', 'subtask', 'time_spent'])
df.to_csv('stuff/all.csv', sep=';', index=None)
class AllClientsPageView(TemplateView):
template_name = 'all_clients.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(AllClientsPageView, self).get_context_data(**kwargs)
make_all_client()
all_clients = pd.read_csv('stuff/all.csv', sep=';')
all_clients = all_clients.groupby(['name', 'task', 'subtask']).sum().sum(level=['name', 'task', 'subtask']).fillna(0).reset_index()
all_clients['time_spent'] = pd.to_timedelta(all_clients.time_spent, unit='m')
context.update({'df': all_clients.values})
return context
def make_all_employee():
qs = list(ClientModel.objects.values_list('dec_name',
'time_spent', 'date_added'))
df = pd.DataFrame(qs, columns=['dec_name',
'time_spent', 'date_added'])
df.date_added = df.date_added.values.astype('M8[D]')
df.to_csv('stuff/employees.csv', sep=';', index=None)
class EmployeeTabPageView(TemplateView):
template_name = 'employee_tab.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(EmployeeTabPageView, self).get_context_data(**kwargs)
make_all_employee()
today = datetime.date.today()
employees = pd.read_csv('stuff/employees.csv', sep=';')
employees = employees.groupby(['dec_name', 'date_added']).sum().sum(level=['dec_name', 'date_added']).fillna(0).reset_index()
employees['time_spent'] = pd.to_datetime(employees.time_spent, unit='m').dt.strftime('%H:%M')
date_split = employees['date_added'].str.split('-')
employees['date_added'] = date_split.str[-1] + '/' + date_split.str[1] + '/' + date_split.str[0]
context.update({'df': employees.values})
return context
class SumOfClientView(TemplateView):
template_name = 'clientsum.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(SumOfClientView, self).get_context_data(**kwargs)
make_all_client()
all_clients = | pd.read_csv('stuff/all.csv', sep=';') | pandas.read_csv |
#!/usr/bin/env python3
# @Author : <NAME>
# @FileName : meth_stats_tool.py
# @Software : NANOME project
# @Organization : JAX Li Lab
# @Website : https://github.com/TheJacksonLaboratory/nanome
"""
Tool for pre-processing results
"""
import argparse
import glob
import gzip
import sys
from collections import defaultdict
from multiprocessing import Pool
import h5py
import numpy as np
import pandas as pd
from Bio import SeqIO
from ont_fast5_api.fast5_interface import get_fast5_file
from tqdm import tqdm
from nanocompare.eval_common import load_tombo_df, load_deepmod_df, get_dna_base_from_reference, \
load_sam_as_strand_info_df, load_nanopolish_df
from nanocompare.global_config import *
from nanocompare.global_settings import humanChrSet
def add_strand_info_for_nanopolish(
nanopolish_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.methylation_calls.tsv',
sam_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.sam'):
"""
No need for new nanopolish output
Combine the nanopolish output tsv results with strand-info from SAM files. This will add last column as strand-info.
This is due to original nanopolish output results contain no strand-info, we are going to solve this problem.
Return results columns are:
[(0, 'chromosome'), (1, 'start'), (2, 'end'), (3, 'read_name'), (4, 'log_lik_ratio'), (5, 'log_lik_methylated'), (6, 'log_lik_unmethylated'), (7, 'num_calling_strands'), (8, 'num_cpgs'), (9, 'sequence'), (10, 'strand-info')]
:param nanopolish_fn: nanopolish file name
:param sam_fn: SAM file name for strand-info
:return:
"""
if args.i is not None:
nanopolish_fn = args.i
if args.ibam is not None:
sam_fn = args.ibam
df2 = load_sam_as_strand_info_df(infn=sam_fn)
df1 = load_nanopolish_df(infn=nanopolish_fn)
df = df1.merge(df2, left_on='read_name', right_on='read-name', how='left')
df = df.drop('read-name', axis=1)
logger.info(df)
logger.info(list(enumerate(df.columns)))
if len(df1) != len(df):
raise Exception(
"We found the read-name of Nanopolish results is not mapped all to SAM/BAM file, please check if the BAM file is used for Nanopolish")
# df = df.iloc[:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
outfn = os.path.join(pic_base_dir,
f'{os.path.splitext(os.path.basename(nanopolish_fn))[0]}-nanopolish-strand-info.tsv')
df.to_csv(outfn, sep='\t', index=False)
logger.info(f'save to {outfn}')
return df
def sanity_check_get_dna_seq(chrstr):
"""
Check 0-based start, input as 'chr1:123'
:param chrstr:
:return:
"""
chr, start = chrstr.strip().split(':')
start = int(start)
show_arrow = ''.join(['~'] * 5 + ['↑'] + ['~'] * 5)
ret = get_dna_base_from_reference(chr, start, ref_fasta=ref_fasta)
logger.info(f'chr={chr}, start={start}\nSEQ={ret}\nPOS={show_arrow}')
def filter_noncg_sites_ref_seq(df, tagname, ntask=1, ttask=1, num_seq=5, chr_col=0, start_col=1, strand_col=5,
toolname='tombo'):
"""
Filter out rows that are non-CG patterns in Tombo results, reference sequence is based on BAM files
from SAM to BAM (with index) script is as follows:
samtools view -S -b K562.sam > K562.bam
samtools sort -o K562.sorted.bam K562.bam
samtools index K562.sorted.bam
:param tombo_fn:
:param sam_fn:
:return:
"""
chrs = df.iloc[:, chr_col].unique()
chrs = np.sort(chrs)
logger.info(chrs)
logger.info(len(chrs))
all_list = list(range(len(df)))
cpg_pattern_index = subset_of_list(all_list, ntask, ttask)
# sel_chrs = subset_of_list(chrs, ntask, ttask)
# logger.info(sel_chrs)
# df = df[df[0].isin(sel_chrs)]
df = df.iloc[cpg_pattern_index, :]
logger.info(df)
rep_chr = df.iloc[0, chr_col]
seq_col = []
cpg_pattern_index = []
print_first = True
for index, row in tqdm(df.iterrows()):
if print_first:
logger.info(f"index={index}, row={row}")
print_first = False
chr = row[chr_col]
start = int(row[start_col])
strand_info = row[strand_col]
# ret = get_dna_sequence_from_samfile(chr, start, start + num_seq, samfile) # may return None, if no sequence at all reads
ret = get_dna_base_from_reference(chr, start, num_seq=num_seq, ref_fasta=ref_fasta)
seq_col.append(ret)
if toolname == 'tombo':
if ret[5:7] == 'CG':
cpg_pattern_index.append(index)
elif toolname == 'deepmod':
if strand_info == '+':
if ret[5:7] == 'CG':
cpg_pattern_index.append(index)
elif strand_info == '-':
if ret[4:6] == 'CG':
cpg_pattern_index.append(index)
# TODO: using ret if it is CG pattern, or will remove later
# logger.info(f'chr={chr}, start={start}, strand={strand_info}, ret={ret}')
# if index > 10000:
# break
df['sequence'] = seq_col
logger.debug(f'before filter:{len(df)}, after non-CG filter:{len(cpg_pattern_index)}')
df = df.loc[cpg_pattern_index, :]
# tagname is like 'K562.tombo.perReadsStats.combine'
# then outfn is like 'K562.tombo.perReadsStats.combine-with-seq-info-n300-t001-chr1.tsv'
outfn = os.path.join(args.o, f'{tagname}-with-seq-info-n{ntask}-t{ttask:03d}-{rep_chr}.tsv')
df.to_csv(outfn, sep='\t', header=False, index=False)
logger.info(f"save to {outfn}")
def filter_noncg_sites_ref_seq_mpi(df, tagname, ntask=1, ttask=1, num_dna_seq=5, chr_col=0, start_col=1, strand_col=5,
toolname='tombo', print_first=False):
"""
MPI version
invoke like: res = p.apply_async(testFunc, args=(2, 4), kwds={'calcY': False})
or pool.apply_async(test, (t,), dict(arg2=5))
Filter out rows that are non-CG patterns in Tombo results, reference sequence is based on BAM files
:param tombo_fn:
:param sam_fn:
:return:
"""
rep_chr = df.iloc[0, chr_col]
seq_col = []
only_cpg_pattern_index = []
for index, row in df.iterrows():
if print_first:
logger.info(f"index={index}, row={row}")
print_first = False
chr = row[chr_col]
start = int(row[start_col])
strand_info = row[strand_col]
ret = get_dna_base_from_reference(chr, start, num_seq=num_dna_seq, ref_fasta=ref_fasta)
seq_col.append(ret)
if toolname == 'tombo':
if ret[5:7] == 'CG':
only_cpg_pattern_index.append(index)
elif toolname in ['deepmod', 'deepmod-read-level']:
if strand_info == '+':
if ret[5:7] == 'CG':
only_cpg_pattern_index.append(index)
elif strand_info == '-':
if ret[4:6] == 'CG':
only_cpg_pattern_index.append(index)
df['sequence'] = seq_col
# logger.debug(f'Subprocess [{ttask}:{ntask}] finished, before filter:{len(df)}, after non-CG filter:{len(only_cpg_pattern_index)}')
df = df.loc[only_cpg_pattern_index, :]
# tagname is like 'K562.tombo.perReadsStats.combine'
# then outfn is like 'K562.tombo.perReadsStats.combine-with-seq-info-n300-t001-chr1.tsv'
# outfn = os.path.join(args.o, f'{tagname}-with-seq-info-n{ntask}-t{ttask:03d}-{rep_chr}.tsv')
# df.to_csv(outfn, sep='\t', header=False, index=False)
# logger.info(f"save to {outfn}")
logger.info(f"Finished of subprocess {ttask}:{ntask}")
return df
def filter_noncg_sites_for_tombo(
tombo_fn='/projects/li-lab/yang/workspace/nano-compare/data/tools-call-data/K562/K562.tombo_perReadsStats.bed',
sam_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.sorted.bam', ntask=1, ttask=1, num_seq=5):
if args.i is not None:
tombo_fn = args.i
df = load_tombo_df(infn=tombo_fn)
basefn = os.path.basename(tombo_fn)
basename = os.path.splitext(basefn)[0]
filter_noncg_sites_ref_seq(df=df, tagname=basename, ntask=ntask, ttask=ttask, num_seq=num_seq)
def convert_bismark_add_strand_and_seq(indf, outfn, report_num=None):
"""
Check start pointer, if point to CG's C, it is positive strand, or else, it is reverse strand
Note: input file is 1-based start, we also output to a 1-based format that is compatable to our Bismark import functions.
:param indf:
:param outf:
:param report_num:
:return:
"""
logger.debug(f'Start add strand and seq to bismark cov file, total len={len(indf)}')
outf = gzip.open(outfn, 'wt')
for index, row in tqdm(indf.iterrows()):
# if report_num and index % report_num == 0:
# logger.debug(f'processed index={index}')
chr = row['chr']
start = int(row['start']) # Keep raw 1-based format of bismark results
ret = get_dna_base_from_reference(chr, start - 1, ref_fasta=ref_fasta)
if ret[5] == 'C': # strand is +
strand = '+'
elif ret[5] == 'G':
strand = '-'
else:
raise Exception(f'We can not identify this bg-truth file with non-CG results, such as row={row}')
outstr = '\t'.join([chr, str(start), strand, str(row['mcount']), str(row['ccount']), ret[4:7]])
outf.write(f'{outstr}\n')
outf.close()
logger.info(f'save to {outfn}')
logger.debug(f'Finish add strand info task')
def convert_bismark_cov_to_gw_format(df):
"""
Save adding strand info and dna seq format, which is in same format of Bismark Genome-wide output files
:param df:
:return:
"""
basefn = os.path.basename(args.i)
basename = os.path.splitext(basefn)[0]
outfn = os.path.join(args.o, f'{basename}.convert.add.strand.tsv.gz')
convert_bismark_add_strand_and_seq(df, outfn)
def filter_noncg_sites_mpi(df, ntask=300, toolname='tombo'):
"""
MPI version of filter out non-CG patterns
:return:
"""
basefn = os.path.basename(args.i)
basename = os.path.splitext(basefn)[0]
all_list = list(range(len(df)))
# Store each sub-process return results
df_list = []
with Pool(processes=args.processors) as pool:
for epoch in range(ntask):
cpg_pattern_index = subset_of_list(all_list, ntask, epoch + 1)
seldf = df.iloc[cpg_pattern_index, :]
if toolname == 'tombo':
df_list.append(pool.apply_async(filter_noncg_sites_ref_seq_mpi, (seldf, basename, ntask, epoch + 1)))
elif toolname == 'deepmod':
df_list.append(pool.apply_async(filter_noncg_sites_ref_seq_mpi, (seldf, basename, ntask, epoch + 1),
dict(chr_col=0, start_col=1, strand_col=5, toolname='deepmod')))
elif toolname == 'deepmod-read-level':
df_list.append(pool.apply_async(filter_noncg_sites_ref_seq_mpi, (seldf, basename, ntask, epoch + 1),
dict(chr_col=0, start_col=1, strand_col=5,
toolname='deepmod-read-level')))
else:
raise Exception(f"{toolname} is no valid.")
pool.close()
pool.join()
# Combine df
logger.debug("Start to combine all results")
df_list = [df1.get() for df1 in df_list]
retdf = pd.concat(df_list)
logger.debug(retdf)
## Note: original input=K562.tombo.perReadsStats.combine.tsv
## output=K562.tombo.perReadsStatsOnlyCpG.combine.tsv
if toolname == 'tombo':
basefn = basefn.replace("perReadsStats", "perReadsStatsOnlyCG").replace("combined", "combine")
elif toolname == 'deepmod':
## Example: HL60.deepmod.C.combined.tsv
basefn = basefn.replace(".C.", ".C_OnlyCG.").replace("combined", "combine")
else:
raise Exception(f"{toolname} is no valid.")
outfn = os.path.join(args.o, f'{basefn}')
retdf.to_csv(outfn, sep='\t', index=False, header=False)
logger.debug(f"Save to {outfn}")
def filter_noncg_sites_for_deepmod(
deepmod_fn='/projects/li-lab/yang/workspace/nano-compare/data/tools-call-data/K562/K562.deepmod_combined.bed',
sam_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.sorted.bam', ntask=1, ttask=1, num_seq=5):
if args.i is not None:
deepmod_fn = args.i
df = load_deepmod_df(infn=deepmod_fn)
basefn = os.path.basename(deepmod_fn)
basename = os.path.splitext(basefn)[0]
filter_noncg_sites_ref_seq(df=df, tagname=basename, ntask=ntask, ttask=ttask, num_seq=num_seq, chr_col=0,
start_col=1, strand_col=5, toolname='deepmod')
def subset_of_list(alist, n, t):
"""
Subset of a list for multi-processing
n=1 to 100
t=1 to N
return subset list of alist
:param alist:
:param n:
:param t:
:return:
"""
if t < 1 or t > n:
raise Exception(f't={t} is not accept, must be 1-N (include)')
if n > len(alist): # if n is bigger than all list, return only 1 for t<=len
if t <= len(alist):
return [alist[t - 1]]
else:
return None
m = int(len(alist) / n) # each task of a section of list
start_index = int((t - 1) * m)
if t == n:
sublist = alist[start_index:]
else:
sublist = alist[start_index:start_index + m]
# logger.debug(f'n={n}, t={t}, section={m}, index={start_index}:{start_index + m}')
return sublist
def get_f5_readid_map(flist):
f5_readid_map = defaultdict(str)
for fn in flist:
basename = os.path.basename(fn)
with get_fast5_file(fn, mode="r") as f5:
# if len(f5.get_reads()) >= 2:
# raise Exception(f'We can only deal with one read in fast5, but fn={fn}, contains {len(f5.get_reads())} multiple reads')
for read in f5.get_reads():
f5_readid_map[basename] = str(read.read_id)
return f5_readid_map
def build_map_fast5_to_readid_mp(
basedir='/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-basecall', ntask=300):
patfn = os.path.join(basedir, '**', '*.fast5')
fast5_flist = glob.glob(patfn, recursive=True)
logger.info(f'Total fast5 files: {len(fast5_flist)}')
ret_list = []
with Pool(processes=args.processors) as pool:
for epoch in range(ntask):
subflist = subset_of_list(fast5_flist, ntask, epoch + 1)
ret_list.append(pool.apply_async(get_f5_readid_map, (subflist,)))
pool.close()
pool.join()
logger.debug('Finish fast5 to read-id mapping')
f5_readid_map = defaultdict(str)
for ret in ret_list:
f5_readid_map.update(ret.get())
# for fn in fast5_flist[:]:
# # logger.debug(fn)
# basename = os.path.basename(fn)
#
# with get_fast5_file(fn, mode="r") as f5:
# for read in f5.get_reads():
# # logger.debug(read.read_id)
# f5_readid_map[basename] = str(read.read_id)
return f5_readid_map
def process_pred_detail_f5file(fn, f5_readid_map):
"""
For each deepmod prediction results file, we analyze a df result of read-level results
:param fn:
:param f5_readid_map:
:return:
"""
f5_pred_key = '/pred/pred_0/predetail'
dflist = []
with h5py.File(fn, 'r') as mr:
# m_pred = mr[f5_pred_key].value
# logger.debug(m_pred)
for name in mr['/pred']:
# logger.debug(name)
pred_num_key = f'/pred/{name}'
f5file = os.path.basename(mr[pred_num_key].attrs['f5file'])
mapped_chr = mr[pred_num_key].attrs['mapped_chr']
mapped_strand = mr[pred_num_key].attrs['mapped_strand']
# logger.debug(f'{pred_num_key}: chr={mapped_chr}, strand={mapped_strand}, f5file={f5file}')
pred_detail_key = f'{pred_num_key}/predetail'
# m_pred = mr[pred_detail_key].value
m_pred = mr[pred_detail_key][()]
m_pred = np.array(m_pred, dtype=[('refbase', 'U1'), ('readbase', 'U1'), ('refbasei', np.uint64),
('readbasei', np.uint64), ('mod_pred', np.int)])
dataset = []
for mi in range(len(m_pred)):
if m_pred['refbase'][mi] not in ['C']:
continue
if m_pred['refbase'][mi] in ['-', 'N', 'n']:
continue
# if m_pred['readbase'][mi] == '-':
# continue
# Filter non-CG patterns results
ret = get_dna_base_from_reference(mapped_chr, int(m_pred['refbasei'][mi]), ref_fasta=ref_fasta)
if mapped_strand == '+':
if ret[5:7] != 'CG':
continue
elif mapped_strand == '-':
if ret[4:6] != 'CG':
continue
if -0.1 < m_pred['mod_pred'][mi] - 1 < 0.1:
meth_indicator = 1
else:
meth_indicator = 0
# sp_options['4NA'][m_pred['refbase'][mi]][(cur_chr, cur_strand, int(m_pred['refbasei'][mi]) )][0] += 1
ret = {'start': int(m_pred['refbasei'][mi]), 'pred': meth_indicator, 'base': m_pred['refbase'][mi],
'sequence': ret}
dataset.append(ret)
df = pd.DataFrame(dataset)
if len(df) < 1:
continue
df['chr'] = str(mapped_chr)
df['end'] = df['start'] + 1
df['strand'] = str(mapped_strand)
df['read-id'] = f5_readid_map[f5file]
df = df[['chr', 'start', 'end', 'read-id', 'base', 'strand', 'sequence', 'pred']]
# logger.info(df)
dflist.append(df)
sumdf = pd.concat(dflist)
# logger.debug(f'Process pred detail file {fn} finished, total reads={len(sumdf)}.')
return sumdf
def extract_deepmod_read_level_results_mp(
basecallDir='/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-basecall',
methcallDir='/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-methcall', ntask=50):
f5_readid_map = build_map_fast5_to_readid_mp(basedir=basecallDir, ntask=ntask)
# logger.debug(f5_readid_map)
# dirname = '/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-methcall/**/rnn.pred.detail.fast5.*'
dirname = os.path.join(methcallDir, '**', 'rnn.pred.detail.fast5.*')
fast5_flist = glob.glob(dirname, recursive=True)
logger.info(f'Total deepmod fast5 files:{len(fast5_flist)}')
dflist = []
with Pool(processes=args.processors) as pool:
for fn in fast5_flist[:]:
# df = process_pred_detail_f5file(fn, f5_readid_map)
dflist.append(pool.apply_async(process_pred_detail_f5file, (fn, f5_readid_map,)))
# logger.debug(df)
# logger.debug(df.iloc[1, :])
# logger.debug(fn)
# pass
pool.close()
pool.join()
dflist = [df.get() for df in dflist]
sumdf = pd.concat(dflist)
logger.debug('Finish get df from deepmod fast5 predetail files')
cpgDict = defaultdict(lambda: [0, 0]) # 0:cov, 1:meth-cov
for index, row in sumdf.iterrows():
chr = row['chr']
start = row['start']
strand = row['strand']
basekey = (chr, start, strand)
cpgDict[basekey][0] += 1
if row['pred'] == 1:
cpgDict[basekey][1] += 1
logger.debug(f'CpG sites={len(cpgDict)}')
dataset = []
for site in cpgDict:
ret = {'chr': site[0], 'start': site[1], 'end': site[1] + 1, 'base': 'C', 'cap-cov': cpgDict[site][0],
'strand': site[2], 'no-use1': '', 'start1': site[1], 'end1': site[1] + 1, 'no-use2': '0,0,0',
'cov': cpgDict[site][0], 'meth-freq': int(100 * cpgDict[site][1] / cpgDict[site][0]),
'meth-cov': cpgDict[site][1]}
dataset.append(ret)
beddf = pd.DataFrame(dataset)
beddf = beddf[
['chr', 'start', 'end', 'base', 'cap-cov', 'strand', 'no-use1', 'start1', 'end1', 'no-use2', 'cov', 'meth-freq',
'meth-cov']]
logger.debug('Finish bed df, extract all DONE.')
return sumdf, beddf
def parse_arguments():
"""
:return:
"""
parser = argparse.ArgumentParser(description='Multi-task')
parser.add_argument("cmd", help="name of command: compute, combine, or gen-pixel-info")
parser.add_argument('-n', type=int, help="the total number of tasks (1-27)", default=1)
parser.add_argument('-t', type=int, help="the current task id (1-N)", default=1)
parser.add_argument('-i', type=str, help="input file", default=None)
parser.add_argument('-o', type=str, help="output dir or file", default=pic_base_dir)
parser.add_argument('--o2', type=str, help="second output dir or file", default=None)
parser.add_argument('--ibam', type=str, help="input bam/sam file", default=None)
parser.add_argument('--basecallDir', type=str, help="basecallDir dir name", default=None)
parser.add_argument('--methcallDir', type=str, help="methcallDir dir name", default=None)
parser.add_argument('--processors', type=int, help="Number of processors", default=8)
parser.add_argument('--mpi', action='store_true')
parser.add_argument('--chrs', nargs='+', help='all chrs need to check', default=[])
return parser.parse_args()
def output_bed_by_bin(bin_id):
num_bins = 5
density_col = 4
output_cols = [0, 1, 2]
bin_value = int(bin_id / num_bins * 100 + 1e-5)
logger.info(f"start with bin_id={bin_id}, bin_value={bin_value}")
ndf = df[df[density_col] == bin_value]
ndf = ndf.iloc[:, output_cols]
logger.info(f"start to save, df={len(df):,}, ndf={len(ndf):,}, for bin_value={bin_value}")
outfn = os.path.join(args.o, f"hg38.gc5Base.bin{bin_value}.bed.gz")
ndf.to_csv(outfn, sep='\t', header=False, index=False)
logger.info(f"save to {outfn}")
def output_bed_by_bin2(infn, num_bins):
inf = gzip.open(infn, 'rt')
outf_list = []
for bin_id in range(0, num_bins + 1):
bin_value = int(bin_id / num_bins * 100 + 1e-5)
outf_list.append(gzip.open(os.path.join(args.o, f"hg38.gc5Base.bin{bin_value}.bed.gz"), 'wt'))
for row in tqdm(inf):
tmp = row.strip().split("\t")
density_col = 4
bin_value = int(float(tmp[density_col]) + 1e-5)
bin_id = bin_value // 20
if bin_id not in range(0, num_bins + 1):
logger.error(f"Error found: bin_value={bin_value}, bin_id={bin_id}, for row={row}")
raise Exception(f"Error found: bin_value={bin_value}, bin_id={bin_id}, for row={row}")
outf_list[bin_id].write(f"{tmp[0]}\t{tmp[1]}\t{tmp[2]}\n")
[outf.close for outf in outf_list]
logger.info("Finished bin bed for gc density")
def save_tss_bed_for_5hmc(infn, outfn):
logger.info(f"open infn={infn}")
df = pd.read_csv(infn, sep='\t', header=None)
logger.debug(df)
df = df.iloc[:, [0, 1, 2, 4, 7]]
df.columns = ['chr', 'start', 'end', '5hmc_level', 'strand']
df['n1'] = '.'
df['start'] = df['start'].astype(int) - 1
df['end'] = df['end'].astype(int) - 1
df['5hmc_level'] = df['5hmc_level'].astype(float)
df = df[['chr', 'start', 'end', '5hmc_level', 'n1', 'strand']]
logger.info(f"df['5hmc_level'] = {df['5hmc_level'].describe()}")
logger.info(f"len(df['5hmc_level'] >= 1.0) = {(df.loc[:, '5hmc_level'] >= 1.0 - 1e-3).sum()}")
df.to_csv(outfn, sep='\t', header=False, index=False)
logger.info(f"save to {outfn}")
pass
if __name__ == '__main__':
set_log_debug_level()
args = parse_arguments()
logger.debug(args)
ref_fasta = None
if args.cmd in ['tombo-add-seq', 'deepmod-add-seq', 'deepmod-read-level', 'sanity-check-seq',
'bismark-convert']: # These command will use reference genome
ref_fn = '/projects/li-lab/Ziwei/Nanopore/data/reference/hg38.fa'
ref_fasta = SeqIO.to_dict(SeqIO.parse(open(ref_fn), 'fasta'))
if args.cmd == 'tombo-add-seq':
if args.mpi:
logger.debug('in mpi mode')
import multiprocessing
logger.debug(
"There are %d CPUs on this machine by multiprocessing.cpu_count()" % multiprocessing.cpu_count())
df = load_tombo_df(infn=args.i)
filter_noncg_sites_mpi(df)
else:
filter_noncg_sites_for_tombo(ntask=args.n, ttask=args.t)
elif args.cmd == 'deepmod-add-seq':
if args.mpi:
logger.debug('in mpi mode')
import multiprocessing
logger.debug(
"There are %d CPUs on this machine by multiprocessing.cpu_count()" % multiprocessing.cpu_count())
df = load_deepmod_df(infn=args.i)
filter_noncg_sites_mpi(df, toolname='deepmod')
else:
filter_noncg_sites_for_deepmod(ntask=args.n, ttask=args.t)
elif args.cmd == 'nanopolish-add-strand':
add_strand_info_for_nanopolish()
elif args.cmd == 'sanity-check-seq':
## bash meth_stats_tool.sh sanity-check-seq --chrs chr4:10164 chr4:10298
for chrstr in args.chrs:
# logger.info(chrstr)
sanity_check_get_dna_seq(chrstr)
elif args.cmd == 'deepmod-read-level':
### Running bash:
"""
sbatch meth_stats_tool_mpi.sh deepmod-read-level --basecallDir /fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-basecall --methcallDir /fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-methcall -o /fastscratch/liuya/nanocompare/deepmod-read-level1.tsv --o2 /fastscratch/liuya/nanocompare/deepmod-read-level1-extract-output.bed
"""
sumdf, beddf = extract_deepmod_read_level_results_mp(basecallDir=args.basecallDir, methcallDir=args.methcallDir)
logger.info(sumdf)
logger.info(sumdf.iloc[1, :])
logger.info(sumdf['chr'].unique())
# outfn = os.path.join('/fastscratch/liuya/nanocompare/', 'deepmod-read-level.tsv')
# Save read level results
outfn = args.o
sumdf.to_csv(outfn, sep='\t', index=False, header=False)
logger.info(f'save to {outfn}')
if args.o2: # Save CpG base level results bed file for cluster module use
outfn = args.o2
beddf.to_csv(outfn, sep=' ', index=False, header=False)
logger.info(f'save to {outfn}')
elif args.cmd == 'bismark-convert': # Convert non-strand info bismark to strand
## bash meth_stats_tool.sh bismark-convert -i /pod/2/li-lab/Ziwei/Nanopore_methyl_compare/result/APL_BSseq/APL-bs_R1_val_1_bismark_bt2_pe.deduplicated.sorted.bed
## sbatch meth_stats_tool.sh bismark-convert -i /pod/2/li-lab/Ziwei/Nanopore_methyl_compare/result/APL_BSseq/APL-bs_R1_val_1_bismark_bt2_pe.deduplicated.sorted.bed
df = pd.read_csv(args.i, sep='\t', header=None)
if len(df.columns) != 6:
raise Exception(f"Can no recognize input file format for infn={args.i}, df={df}")
df.columns = ['chr', 'start', 'end', 'freq100', 'mcount', 'ccount']
logger.debug(df)
convert_bismark_cov_to_gw_format(df)
elif args.cmd == 'gc-density-bed':
# sbatch meth_stats_tool.sh gc-density-bed
infn = "/projects/li-lab/yang/workspace/nano-compare/data/genome-annotation/hg38.gc5Base.bed.gz"
output_bed_by_bin2(infn, num_bins=5)
if True:
sys.exit(0)
df = pd.read_csv(infn, sep='\t', header=None)
df.iloc[:, 4] = df.iloc[:, 4].astype(int)
logger.debug(df)
bin_list = list(range(1, 6))
os.makedirs(args.o, exist_ok=True)
with Pool(processes=args.processors) as pool:
pool.map(output_bed_by_bin, bin_list)
elif args.cmd == 'repetitive-bed':
# sbatch meth_stats_tool.sh repetitive-bed
# bash meth_stats_tool.sh repetitive-bed
infn = "/projects/li-lab/yang/results/2021-07-01/hg38.repetitive.bed.gz"
df = | pd.read_csv(infn, sep='\t') | pandas.read_csv |
from functools import reduce
import numpy as np
import pandas as pd
import pyprind
from .enums import *
class Backtest:
"""Backtest runner class."""
def __init__(self, allocation, initial_capital=1_000_000, shares_per_contract=100):
assets = ('stocks', 'options', 'cash')
total_allocation = sum(allocation.get(a, 0.0) for a in assets)
self.allocation = {}
for asset in assets:
self.allocation[asset] = allocation.get(asset, 0.0) / total_allocation
self.initial_capital = initial_capital
self.stop_if_broke = True
self.shares_per_contract = shares_per_contract
self._stocks = []
self._options_strategy = None
self._stocks_data = None
self._options_data = None
@property
def stocks(self):
return self._stocks
@stocks.setter
def stocks(self, stocks):
assert np.isclose(sum(stock.percentage for stock in stocks), 1.0,
atol=0.000001), 'Stock percentages must sum to 1.0'
self._stocks = list(stocks)
return self
@property
def options_strategy(self):
return self._options_strategy
@options_strategy.setter
def options_strategy(self, strat):
self._options_strategy = strat
@property
def stocks_data(self):
return self._stocks_data
@stocks_data.setter
def stocks_data(self, data):
self._stocks_schema = data.schema
self._stocks_data = data
@property
def options_data(self):
return self._options_data
@options_data.setter
def options_data(self, data):
self._options_schema = data.schema
self._options_data = data
def run(self, rebalance_freq=0, monthly=False, sma_days=None):
"""Runs the backtest and returns a `pd.DataFrame` of the orders executed (`self.trade_log`)
Args:
rebalance_freq (int, optional): Determines the frequency of portfolio rebalances. Defaults to 0.
monthly (bool, optional): Iterates through data monthly rather than daily. Defaults to False.
Returns:
pd.DataFrame: Log of the trades executed.
"""
assert self._stocks_data, 'Stock data not set'
assert all(stock.symbol in self._stocks_data['symbol'].values
for stock in self._stocks), 'Ensure all stocks in portfolio are present in the data'
assert self._options_data, 'Options data not set'
assert self._options_strategy, 'Options Strategy not set'
assert self._options_data.schema == self._options_strategy.schema
option_dates = self._options_data['date'].unique()
stock_dates = self.stocks_data['date'].unique()
assert np.array_equal(stock_dates,
option_dates), 'Stock and options dates do not match (check that TZ are equal)'
self._initialize_inventories()
self.current_cash = self.initial_capital
self.trade_log = pd.DataFrame()
self.balance = pd.DataFrame({
'total capital': self.current_cash,
'cash': self.current_cash
},
index=[self.stocks_data.start_date - pd.Timedelta(1, unit='day')])
if sma_days:
self.stocks_data.sma(sma_days)
dates = pd.DataFrame(self.options_data._data[['quotedate',
'volume']]).drop_duplicates('quotedate').set_index('quotedate')
rebalancing_days = pd.to_datetime(
dates.groupby(pd.Grouper(freq=str(rebalance_freq) +
'BMS')).apply(lambda x: x.index.min()).values) if rebalance_freq else []
data_iterator = self._data_iterator(monthly)
bar = pyprind.ProgBar(len(stock_dates), bar_char='█')
for date, stocks, options in data_iterator:
if (date in rebalancing_days):
previous_rb_date = rebalancing_days[rebalancing_days.get_loc(date) -
1] if rebalancing_days.get_loc(date) != 0 else date
self._update_balance(previous_rb_date, date)
self._rebalance_portfolio(date, stocks, options, sma_days)
bar.update()
# Update balance for the period between the last rebalancing day and the last day
self._update_balance(rebalancing_days[-1], self.stocks_data.end_date)
self.balance['options capital'] = self.balance['calls capital'] + self.balance['puts capital']
self.balance['stocks capital'] = sum(self.balance[stock.symbol] for stock in self._stocks)
self.balance['stocks capital'].iloc[0] = 0
self.balance['options capital'].iloc[0] = 0
self.balance[
'total capital'] = self.balance['cash'] + self.balance['stocks capital'] + self.balance['options capital']
self.balance['% change'] = self.balance['total capital'].pct_change()
self.balance['accumulated return'] = (1.0 + self.balance['% change']).cumprod()
return self.trade_log
def _initialize_inventories(self):
"""Initialize empty stocks and options inventories."""
columns = pd.MultiIndex.from_product(
[[l.name for l in self._options_strategy.legs],
['contract', 'underlying', 'expiration', 'type', 'strike', 'cost', 'order']])
totals = pd.MultiIndex.from_product([['totals'], ['cost', 'qty', 'date']])
self._options_inventory = pd.DataFrame(columns=columns.append(totals))
self._stocks_inventory = pd.DataFrame(columns=['symbol', 'price', 'qty'])
def _data_iterator(self, monthly):
"""Returns combined iterator for stock and options data.
Each step, it produces a tuple like the following:
(date, stocks, options)
Returns:
generator: Daily/monthly iterator over `self._stocks_data` and `self.options_data`.
"""
if monthly:
it = zip(self._stocks_data.iter_months(), self._options_data.iter_months())
else:
it = zip(self._stocks_data.iter_dates(), self._options_data.iter_dates())
return ((date, stocks, options) for (date, stocks), (_, options) in it)
def _rebalance_portfolio(self, date, stocks, options, sma_days):
"""Reabalances the portfolio according to `self.allocation` weights.
Args:
date (pd.Timestamp): Current date.
stocks (pd.DataFrame): Stocks data for the current date.
options (pd.DataFrame): Options data for the current date.
sma_days (int): SMA window size
"""
self._execute_option_exits(date, options)
stock_capital = self._current_stock_capital(stocks)
options_capital = self._current_options_capital(options)
total_capital = self.current_cash + stock_capital + options_capital
# buy stocks
stocks_allocation = self.allocation['stocks'] * total_capital
self._stocks_inventory = pd.DataFrame(columns=['symbol', 'price', 'qty'])
# We simulate a sell of the stock positions and then a rebuy.
# This would **not** work if we added transaction fees.
self.current_cash = stocks_allocation + total_capital * self.allocation['cash']
self._buy_stocks(stocks, stocks_allocation, sma_days)
# exit/enter contracts
options_allocation = self.allocation['options'] * total_capital
if options_allocation >= options_capital:
self._execute_option_entries(date, options, options_allocation - options_capital)
else:
to_sell = options_capital - options_allocation
current_options = self._get_current_option_quotes(options)
self._sell_some_options(date, to_sell, current_options)
def _sell_some_options(self, date, to_sell, current_options):
sold = 0
total_costs = sum([current_options[i]['cost'] for i in range(len(current_options))])
for (exit_cost, (row_index, inventory_row)) in zip(total_costs, self._options_inventory.iterrows()):
if (to_sell - sold > -exit_cost) and (to_sell - sold) > 0:
qty_to_sell = (to_sell - sold) // exit_cost
if -qty_to_sell <= inventory_row['totals']['qty']:
qty_to_sell = (to_sell - sold) // exit_cost
else:
if qty_to_sell != 0:
qty_to_sell = -inventory_row['totals']['qty']
if qty_to_sell != 0:
trade_log_append = self._options_inventory.loc[row_index].copy()
trade_log_append['totals', 'qty'] = -qty_to_sell
trade_log_append['totals', 'date'] = date
trade_log_append['totals', 'cost'] = exit_cost
for i, leg in enumerate(self._options_strategy.legs):
trade_log_append[leg.name, 'order'] = ~trade_log_append[leg.name, 'order']
trade_log_append[leg.name, 'cost'] = current_options[i].loc[row_index]['cost']
self.trade_log = self.trade_log.append(trade_log_append, ignore_index=True)
self._options_inventory.at[row_index, ('totals', 'date')] = date
self._options_inventory.at[row_index, ('totals', 'qty')] += qty_to_sell
sold += (qty_to_sell * exit_cost)
self.current_cash += sold - to_sell
def _current_stock_capital(self, stocks):
"""Return the current value of the stocks inventory.
Args:
stocks (pd.DataFrame): Stocks data for the current time step.
Returns:
float: Total capital in stocks.
"""
current_stocks = self._stocks_inventory.merge(stocks,
how='left',
left_on='symbol',
right_on=self._stocks_schema['symbol'])
return (current_stocks[self._stocks_schema['adjClose']] * current_stocks['qty']).sum()
def _current_options_capital(self, options):
options_value = self._get_current_option_quotes(options)
values_by_row = [0] * len(options_value[0])
if len(options_value[0]) != 0:
for i in range(len(self._options_strategy.legs)):
values_by_row += options_value[i]['cost'].values
total = -sum(values_by_row * self._options_inventory['totals']['qty'].values)
else:
total = 0
return total
def _buy_stocks(self, stocks, allocation, sma_days):
"""Buys stocks according to their given weight, optionally using an SMA entry filter.
Updates `self._stocks_inventory` and `self.current_cash`.
Args:
stocks (pd.DataFrame): Stocks data for the current time step.
allocation (float): Total capital allocation for stocks.
sma_days (int): SMA window.
"""
stock_symbols = [stock.symbol for stock in self.stocks]
query = '{} in {}'.format(self._stocks_schema['symbol'], stock_symbols)
inventory_stocks = stocks.query(query)
stock_percentages = np.array([stock.percentage for stock in self.stocks])
stock_prices = inventory_stocks[self._stocks_schema['adjClose']]
if sma_days:
qty = np.where(inventory_stocks['sma'] < stock_prices, (allocation * stock_percentages) // stock_prices, 0)
else:
qty = (allocation * stock_percentages) // stock_prices
self.current_cash -= np.sum(stock_prices * qty)
self._stocks_inventory = | pd.DataFrame({'symbol': stock_symbols, 'price': stock_prices, 'qty': qty}) | pandas.DataFrame |
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
from rest_framework import permissions
from rest_framework.views import APIView
from rest_framework.response import Response
import requests
import json
from django.views.decorators.csrf import csrf_exempt
# from rest_framework.decorators import api_view,renderer_classes,
from django.http import JsonResponse
from django.views.decorators.http import require_POST
from rest_framework.decorators import api_view,schema
import os,subprocess,pathlib
from string import ascii_uppercase
from random import choice
import pandas as pd
import skimage
from multiprocessing import Lock, Process
lockForModelLoad = None
def create_lockForModel():
global lockForModelLoad
lockForModelLoad = Lock()
# from SwaggerSchema.schemas import (loadModelSwagger,
# predictTestDataSwagger,
# unloadModelSwagger,
# )
from trainModel import kerasUtilities
from trainModel.mergeTrainingV2 import PMMLMODELSTORAGE
from trainModel.mergeTrainingV2 import NewModelOperations
kerasUtilities = kerasUtilities.KerasUtilities()
global PMMLMODELSTORAGE
class Scoring:
def getListOfModelinMemory():
global PMMLMODELSTORAGE
# print ('PMMLMODELSTORAGE',PMMLMODELSTORAGE)
moreDetails=[]
for j in PMMLMODELSTORAGE:
temp_dict={}
temp_dict['modelName']=j
try:
temp_dict['inputShape']=PMMLMODELSTORAGE[j]['inputShape']
except:
pass
# temp_dict['predClasses']=[str(cl) for cl in PMMLMODELSTORAGE[j]['predClasses']]
try:
temp_dict['status']=PMMLMODELSTORAGE[j]['status']
except:
pass
moreDetails.append(temp_dict)
# print ('>>>',temp_dict)
return JsonResponse(moreDetails, safe=False,status=200)
def loadModelfile(self,filpath, idforData=None):
# print ('>>>>>',filpath)
global PMMLMODELSTORAGE
# filpath=requests.POST.get('filePath')
# print ('>>>>>>> filepath',filpath)
# filpath=filpath.replace('.pmml','')
keyOfGlobalMemory,messNotice,modelType=kerasUtilities.loadPMMLmodel(filpath,idforData)
# print ('>>>>>>> messnotice',messNotice)
if messNotice=='Success':
# modelDetails={j:PMMLMODELSTORAGE[j] for j in PMMLMODELSTORAGE}
# print ('>>>>>>>>>>>>>>>>>>>>>',PMMLMODELSTORAGE )
# modelDetails={'inputShape':modelDetails[keyOfGlobalMemory]['inputShape'],
# 'predClasses':modelDetails[keyOfGlobalMemory]['predClasses'],
# 'status':modelDetails[keyOfGlobalMemory]['status'],}
data_details={'message':'Model loaded successfully','keytoModel':keyOfGlobalMemory}#,'modelDetails':modelDetails}
statusCode = 200
# print('PMMLMODELSTORAGE',PMMLMODELSTORAGE)
# return JsonResponse(data_details)
# elif (messNotice=='Success') & (modelType=='sklearnM'):
# # print ('>>>>>')
# # modelDetails={j:PMMLMODELSTORAGE[j] for j in PMMLMODELSTORAGE}
# data_details={'message':'Model loaded successfully','keytoModel':keyOfGlobalMemory}
elif messNotice=='Failure':
data_details={'message':'Model loading failed, please contact Admin','keytoModel':None}
statusCode = 500
return JsonResponse(data_details,status= statusCode)
def removeModelfromMemory(self,modelName):
# print('>>>>>>>>>>>>>>>>came here')
global PMMLMODELSTORAGE
# modelname=param
modelName=modelName.replace('.pmml','')
# print('modelname ',modelname)
try:
messNotice=kerasUtilities.deleteLoadedModelfromMemory(modelName)
data_details={'message':'Model unloaded successfully, now it will not be available for predictions.'}
statusCode = 200
except:
data_details={'message':'Not able to locate, make sure the model was loaded'}
statusCode = 500
print(data_details)
return JsonResponse(data_details,status= statusCode)
def predicttestdata(self,filpath,modelName,jsonData=None):
# print('Came Step 1')
def checkValInPMMLSTO(pmmlstorage,valtoCheck):
try:
val=pmmlstorage[valtoCheck]
except:
val=None
return val
def checkExtensionOfFile(fP):
return pathlib.Path(fP).suffix
global PMMLMODELSTORAGE
pmmlstoragepointer=modelName
# print ('>>>>',pmmlstoragepointer)
# print('.,.,.,.',PMMLMODELSTORAGE)
# print('filepath>>>>>>>>>>>>>>>',filpath)
pmmlstoragepointer=pmmlstoragepointer.replace('.pmml','')
pmmlObj=PMMLMODELSTORAGE[pmmlstoragepointer]
modelType=checkValInPMMLSTO(pmmlObj,'modelType')
preProcessScript=checkValInPMMLSTO(pmmlObj,'preProcessScript')
postProcessScript=checkValInPMMLSTO(pmmlObj,'postProcessScript')
scriptOutput=checkValInPMMLSTO(pmmlObj,'scriptOutput')
# print('Came Step 2',modelType,scriptOutput)
# print ('preProcessScript',preProcessScript,'postProcessScript',postProcessScript)
if filpath and (modelType != 'MRCNN'):
print ('Came here in Image classfication')
extenFile=checkExtensionOfFile(filpath)
PMMLMODELSTORAGE[pmmlstoragepointer]['extenFile']=extenFile
import pandas as pd
if (preProcessScript == None) & (postProcessScript == None):
if extenFile in ['.jpg','.JPG','.png','.PNG']:
outputModel=kerasUtilities.predictImagedata(pmmlstoragepointer,filpath)
resulFile=outputModel
elif os.path.isdir(filpath):
numFiles=os.listdir(filpath+'/test')
if len(numFiles) > 100:
tempRunMemory=kerasUtilities.predictFolderDataInBatch(pmmlstoragepointer,filpath,len(numFiles))
tempRunMemory['inTask']=True
return JsonResponse(tempRunMemory,status=200)
else:
resulFile=kerasUtilities.predictFolderdata(pmmlstoragepointer,filpath)
elif extenFile in ['.json']:
data=json.load(open(filpath,'r'))
testData=pd.DataFrame([data])
resulFile=kerasUtilities.predictFiledata(pmmlstoragepointer,testData,modelType)
else:
testData=pd.read_csv(filpath)
resulFile=kerasUtilities.predictFiledata(pmmlstoragepointer,testData,modelType)
elif (preProcessScript != None) & (postProcessScript == None):
if scriptOutput in ['IMAGE','DATA']:
if modelType=='kerasM':
# print ('>>>>>>>>>>>>>>>>',scriptOutput)
resulFile=kerasUtilities.predictCustomCodedata(pmmlstoragepointer,filpath,scriptOutput)
if resulFile.__class__.__name__ == 'dict':
resulFile['inTask']=True
return JsonResponse(resulFile,status=200)
else:
pass
elif (preProcessScript != None) & (postProcessScript != None):
# print('Came Step 3')
if scriptOutput in ['IMAGE','DATA']:
if modelType=='kerasM':
# print ('>>>>>>>>>>>>>>>>>',scriptOutput)
resulFile=kerasUtilities.predictDataWithPostScript(pmmlstoragepointer,filpath,scriptOutput)
if resulFile.__class__.__name__ == 'dict':
resulFile['inTask']=True
return JsonResponse(resulFile,status=200)
elif (preProcessScript == None) & (postProcessScript != None):
# print('Came Step 4')
# if scriptOutput in ['IMAGE','DATA']:
if modelType=='kerasM':
print ('>>>>>>>>>>>>>>>>>',scriptOutput)
resulFile=kerasUtilities.predictDataWithOnlyPostScript(pmmlstoragepointer,filpath,extenFile)
elif filpath and (modelType == 'MRCNN'):
# print ('Came to MRCNN model >>>>>>')
extenFile=checkExtensionOfFile(filpath)
if extenFile in ['.jpg','.JPG','.png','.PNG']:
resulFile=kerasUtilities.detectObject(filpath, modelName)
else:
import pandas as pd
testData=pd.DataFrame([jsonData])
PMMLMODELSTORAGE[pmmlstoragepointer]['extenFile']='.json'
resulFile=kerasUtilities.predictFiledata(pmmlstoragepointer,testData,modelType)
data_details={'result':resulFile}
return JsonResponse(data_details,status=202)
def predicttestdataReturnJson(self,filpath,modelName,jsonData=None):
# print('Came Step 1')
def checkValInPMMLSTO(pmmlstorage,valtoCheck):
try:
val=pmmlstorage[valtoCheck]
except:
val=None
return val
def checkExtensionOfFile(fP):
return pathlib.Path(fP).suffix
global PMMLMODELSTORAGE
pmmlstoragepointer=modelName
# print ('>>>>',pmmlstoragepointer)
# print('.,.,.,.',PMMLMODELSTORAGE)
# print('filepath>>>>>>>>>>>>>>>',filpath)
pmmlstoragepointer=pmmlstoragepointer.replace('.pmml','')
pmmlObj=PMMLMODELSTORAGE[pmmlstoragepointer]
modelType=checkValInPMMLSTO(pmmlObj,'modelType')
preProcessScript=checkValInPMMLSTO(pmmlObj,'preProcessScript')
postProcessScript=checkValInPMMLSTO(pmmlObj,'postProcessScript')
scriptOutput=checkValInPMMLSTO(pmmlObj,'scriptOutput')
# print('Came Step 2',modelType,scriptOutput)
# print ('preProcessScript',preProcessScript,'postProcessScript',postProcessScript)
if filpath and (modelType != 'MRCNN'):
extenFile=checkExtensionOfFile(filpath)
PMMLMODELSTORAGE[pmmlstoragepointer]['extenFile']=extenFile
import pandas as pd
if (preProcessScript == None) & (postProcessScript == None):
if extenFile in ['.jpg','.JPG','.png','.PNG']:
outputModel=kerasUtilities.predictImagedata(pmmlstoragepointer,filpath)
resulFile=outputModel
elif os.path.isdir(filpath):
numFiles=os.listdir(filpath+'/test')
if len(numFiles) > 100:
tempRunMemory=kerasUtilities.predictFolderDataInBatch(pmmlstoragepointer,filpath,len(numFiles))
tempRunMemory['inTask']=True
return JsonResponse(tempRunMemory,status=200)
else:
resulFile=kerasUtilities.predictFolderdata(pmmlstoragepointer,filpath)
elif extenFile in ['.json']:
data=json.load(open(filpath,'r'))
testData=pd.DataFrame([data])
resulFile=kerasUtilities.predictFiledata(pmmlstoragepointer,testData,modelType)
else:
testData=pd.read_csv(filpath)
resulFile=kerasUtilities.predictFiledata(pmmlstoragepointer,testData,modelType)
elif (preProcessScript != None) & (postProcessScript == None):
if scriptOutput in ['IMAGE','DATA']:
if modelType=='kerasM':
# print ('>>>>>>>>>>>>>>>>',scriptOutput)
resulFile=kerasUtilities.predictCustomCodedata(pmmlstoragepointer,filpath,scriptOutput)
if resulFile.__class__.__name__ == 'dict':
resulFile['inTask']=True
return JsonResponse(resulFile,status=200)
else:
pass
elif (preProcessScript != None) & (postProcessScript != None):
# print('Came Step 3')
if scriptOutput in ['IMAGE','DATA']:
if modelType=='kerasM':
# print ('>>>>>>>>>>>>>>>>>',scriptOutput)
resulFile=kerasUtilities.predictDataWithPostScript(pmmlstoragepointer,filpath,scriptOutput)
if resulFile.__class__.__name__ == 'dict':
resulFile['inTask']=True
return JsonResponse(resulFile,status=200)
elif (preProcessScript == None) & (postProcessScript != None):
# print('Came Step 4')
# if scriptOutput in ['IMAGE','DATA']:
if modelType=='kerasM':
print ('>>>>>>>>>>>>>>>>>',scriptOutput)
resulFile=kerasUtilities.predictDataWithOnlyPostScript(pmmlstoragepointer,filpath,extenFile)
elif filpath and (modelType == 'MRCNN'):
# print ('Came to MRCNN model >>>>>>')
extenFile=checkExtensionOfFile(filpath)
if extenFile in ['.jpg','.JPG','.png','.PNG']:
resulFile=kerasUtilities.detectObject(filpath, modelName)
else:
import pandas as pd
testData= | pd.DataFrame([jsonData]) | pandas.DataFrame |
'''
<NAME>
https://www.quantopian.com/posts/turtle-trading-strategy#:~:text=Turtle%20trading%20is%20a%20well,of%20rules%20is%20more%20intricate.&text=This%20is%20a%20pretty%20fundamental%20strategy%20and%20it%20seems%20to%20work%20well.
https://bigpicture.typepad.com/comments/files/turtlerules.pdf
https://github.com/myquant/strategy/blob/master/Turtle/info.md
https://zhuanlan.zhihu.com/p/161882477
trend following
entry: price > 20 day High
add: for every 0.5 ATR, up to 3 times
stop: < 2 ATR
stop: < 10 day Low
It makes investments in units: one price unit is one ATR; one size unit is 1% of asset / ATR.
'''
import os
import numpy as np
import pandas as pd
from datetime import datetime
import backtrader as bt
from IPython.core.display import display, HTML
# set browser full width
display(HTML("<style>.container { width:100% !important; }</style>"))
class Turtle(bt.Strategy):
params = (
('long_window', 20),
('short_window', 10),
('printlog', False), # comma is required
)
def __init__(self):
self.order = None
self.buyprice = 0.0
self.buycomm = 0.0
self.bar_executed = 0
self.val_start = 0.0
self.buy_count = 0
self.don_high = bt.indicators.Highest(self.data.high(-1), period=self.params.long_window)
self.don_low = bt.indicators.Lowest(self.data.low(-1), period=self.params.short_window)
# https://en.wikipedia.org/wiki/Average_true_range
self.TR = bt.indicators.Max((self.data.high(0) - self.data.low(0)), \
abs(self.data.close(-1) - self.data.high(0)), \
abs(self.data.close(-1) - self.data.low(0)))
self.ATR = bt.indicators.SimpleMovingAverage(self.TR, period=14)
self.buy_signal = bt.ind.CrossOver(self.data.close(0), self.don_high)
self.sell_signal = bt.ind.CrossOver(self.data.close(0), self.don_low)
def log(self, txt, dt=None, doprint=False):
''' Logging function fot this strategy'''
if self.params.printlog or doprint:
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def start(self):
self.val_start = self.broker.get_cash() # keep the starting cash
def notify_trade(self, trade):
if not trade.isclosed:
return
self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' % (trade.pnl, trade.pnlcomm))
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]: # order.Partial
if order.isbuy():
self.log(
'BUY EXECUTED, Price: %.2f, Size: %.0f, Cost: %.2f, Comm %.2f, RemSize: %.0f, RemCash: %.2f' %
(order.executed.price,
order.executed.size,
order.executed.value,
order.executed.comm,
order.executed.remsize,
self.broker.get_cash()))
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
self.log('SELL EXECUTED, Price: %.2f, Size: %.0f, Cost: %.2f, Comm %.2f, RemSize: %.0f, RemCash: %.2f' %
(order.executed.price,
order.executed.size,
order.executed.value,
order.executed.comm,
order.executed.remsize,
self.broker.get_cash()))
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Expired, order.Margin, order.Rejected]:
self.log('Order Failed')
self.order = None
def next(self):
# Simply log the closing price of the series from the reference
# self.log('Close, %.2f' % self.data.close[0])
if self.order:
return
# Long
if self.buy_signal > 0 and self.buy_count == 0:
# one unit is 1% of total risk asset
target_size = int(self.broker.getvalue() * 0.01 / self.ATR[0])
self.order = self.order_target_size(target=target_size)
self.log(f'LONG ORDER SENT, price: {self.data.close[0]:.2f}, don_high: {self.don_high[0]:.2f}')
self.buy_count = 1
# add; This is for futures; may go beyond notional; leverage is set to 4
elif self.data.close > self.buyprice + 0.5 * self.ATR[0] and self.buy_count > 0 and self.buy_count <= 3:
target_size = int(self.broker.getvalue() * 0.01 / self.ATR[0])
target_size += self.getposition(self.datas[0]).size # on top of current size
self.order = self.order_target_size(target=target_size)
self.log(f'ADD LONG ORDER SENT, add time: {self.buy_count}, price: {self.data.close[0]:.2f}, don_high: {self.don_high[0]:.2f}')
self.buy_count += 1
# flat
elif self.sell_signal < 0 and self.buy_count > 0:
self.order = self.order_target_size(target=0)
self.log(f'FLAT ORDER SENT, price: {self.data.close[0]:.2f}, don_low: {self.don_low[0]:.2f}')
self.buy_count = 0
# flat, stop loss
elif self.data.close < (self.buyprice - 2 * self.ATR[0]) and self.buy_count > 0:
self.order = self.order_target_size(target=0)
self.log(f'FLAT ORDER SENT, price: {self.data.close[0]:.2f}, {self.buyprice:.2f}, 2ATR: {2 * self.ATR[0]:.2f}')
self.buy_count = 0
def stop(self):
# calculate the actual returns
print(self.analyzers)
roi = (self.broker.get_value() / self.val_start) - 1.0
self.log('ROI: {:.2f}%'.format(100.0 * roi))
self.log('(Turtle Ending Value %.2f' %
self.broker.getvalue(), doprint=True)
if __name__ == '__main__':
param_opt = False
perf_eval = True
benchmark = 'SPX'
cerebro = bt.Cerebro()
datapath = os.path.join('../data/', 'SPX.csv')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
fromdate=datetime(2010, 1, 1),
todate=datetime(2019, 12, 31),
reverse=False)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(100000.0)
# Add a FixedSize sizer according to the stake
# cerebro.addsizer(bt.sizers.FixedSize, stake=10)
# PercentSizer will flat position first; overwrite if not desired.
# cerebro.addsizer(bt.sizers.PercentSizerInt, percents=95)
# Set the commission - 0.1% ... divide by 100 to remove the %
cerebro.broker.setcommission(commission=0.001, leverage=10)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Add a strategy
cerebro.addstrategy(Turtle, printlog=True)
# Add Analyzer
cerebro.addanalyzer(bt.analyzers.SharpeRatio, _name='SharpeRatio')
cerebro.addanalyzer(bt.analyzers.DrawDown, _name='DrawDown')
cerebro.addanalyzer(bt.analyzers.PyFolio, _name='pyfolio')
# Run over everything
results = cerebro.run()
# Print out the final result
strat = results[0]
print('Final Portfolio Value: %.2f, Sharpe Ratio: %.2f, DrawDown: %.2f, MoneyDown %.2f' %
(cerebro.broker.getvalue(),
strat.analyzers.SharpeRatio.get_analysis()['sharperatio'],
strat.analyzers.DrawDown.get_analysis()['drawdown'],
strat.analyzers.DrawDown.get_analysis()['moneydown']))
if perf_eval:
import matplotlib.pyplot as plt
cerebro.plot(style='candlestick')
plt.show()
pyfoliozer = strat.analyzers.getbyname('pyfolio')
returns, positions, transactions, gross_lev = pyfoliozer.get_pf_items()
print('-------------- RETURNS ----------------')
print(returns)
print('-------------- POSITIONS ----------------')
print(positions)
print('-------------- TRANSACTIONS ----------------')
print(transactions)
print('-------------- GROSS LEVERAGE ----------------')
print(gross_lev)
import empyrical as ep
import pyfolio as pf
bm_ret = None
if benchmark:
datapath = os.path.join('../data/', f'{benchmark}.csv')
bm = pd.read_csv(datapath, index_col=0)
bm_ret = bm['Adj Close'].pct_change().dropna()
bm_ret.index = | pd.to_datetime(bm_ret.index) | pandas.to_datetime |
__author__ = 'Martin'
import pandas as pd
import numpy as np
from scipy import stats
from sklearn import cross_validation
from sklearn import ensemble
def is_transformer(cls):
return hasattr(cls, '__dageva_type') and cls.__dageva_type == 'transformer'
def is_predictor(cls):
return hasattr(cls, '__dageva_type') and cls.__dageva_type == 'predictor'
def make_transformer(cls):
"""
Adds Transformer to the bases of the cls class, useful in order to distinguish between transformers and predictors.
:param cls: The class to turn into a Transformer
:return: A class equivalent to cls, but with Transformer among its bases
"""
cls.__dageva_type = 'transformer'
return cls
def make_predictor(cls):
"""
Adds Predictor to the bases of the cls class, useful in order to distinguish between transformers and predictors.
:param cls: The class to turn into a Predictor
:return: A class equivalent to cls, but with Predictor among its bases
"""
cls.__dageva_type = 'predictor'
return cls
class KMeansSplitter:
def __init__(self, k):
from sklearn import cluster
self.kmeans = cluster.KMeans(n_clusters=k)
self.sorted_outputs = None
self.weight_idx = []
def fit(self, x, y, sample_weight=None):
self.kmeans.fit(x, y)
preds = self.kmeans.predict(x)
out = []
for i in range(self.kmeans.n_clusters):
idx = [n for n in range(len(preds)) if preds[n] == i]
self.weight_idx.append(idx)
if isinstance(x, pd.DataFrame):
out.append(x.iloc[idx])
else:
out.append(x[idx])
mins = [len(x.index) for x in out]
self.sorted_outputs = list(np.argsort(mins))
self.weight_idx = [self.weight_idx[i] for i in self.sorted_outputs]
return self
def transform(self, x):
preds = self.kmeans.predict(x)
out = []
for i in range(self.kmeans.n_clusters):
idx = [n for n in range(len(preds)) if preds[n] == i]
if isinstance(x, pd.DataFrame):
out.append(x.iloc[idx])
else:
out.append(x[idx])
return [out[i] for i in self.sorted_outputs]
class ConstantModel:
def __init__(self, cls):
self.cls = cls
def fit(self, x, y):
return self
def predict(self, x):
return pd.Series(np.array([self.cls]*len(x)), index=x.index)
class Aggregator:
def aggregate(self, x, y):
pass
class Voter(Aggregator):
def fit(self, x, y):
return self
def union_aggregate(self, x, y):
f_list, t_list = x, y
f_frame, t_frame = pd.DataFrame(), pd.Series()
for i in range(len(t_list)):
fl = f_list[i]
assert isinstance(fl, pd.DataFrame)
if fl.columns.dtype == np.dtype('int64'):
cols = map(lambda z: str(id(fl)) + '_' + str(z), fl.columns)
fl.columns = cols
t_frame = t_frame.append(t_list[i])
f_frame = f_frame.append(f_list[i])
f_frame.sort_index(inplace=True)
t_frame = t_frame.sort_index()
return f_frame, t_frame
def aggregate(self, x, y):
if not all([x[0].index.equals(xi.index) for xi in x]):
return self.union_aggregate(x, y)
res = pd.DataFrame(index=y[0].index)
for i in range(len(y)):
res["p"+str(i)] = y[i]
modes = res.apply(lambda row: stats.mode(row, axis=None)[0][0], axis=1)
if modes.empty:
return x[0], pd.Series()
return x[0], pd.Series(modes, index=y[0].index)
class Workflow:
def __init__(self, dag=None):
self.dag = dag
self.sample_weight = None
self.classes_ = None
def fit(self, X, y, sample_weight=None):
import eval #TODO: Refactor to remove circular imports
self.models = eval.train_dag(self.dag, train_data=(X, y), sample_weight=sample_weight)
self.classes_ = np.unique(y)
return self
def predict(self, X):
import eval #TODO: Refactor to remove circular imports
return np.array(eval.test_dag(self.dag, self.models, test_data=(X, None)))
def transform(self, X):
import eval
return eval.test_dag(self.dag, self.models, test_data=(X, None), output='feats_only')
def get_params(self, deep=False):
return {'dag': self.dag}
def set_params(self, **params):
if 'sample_weight' in params:
self.sample_weight = params['sample_weight']
class Stacker(Aggregator):
def __init__(self, sub_dags=None, initial_dag=None):
self.sub_dags = sub_dags
self.initial_dag = initial_dag
def fit(self, X, y, sample_weight=None):
import eval
preds = [[] for _ in self.sub_dags]
for train_idx, test_idx in cross_validation.StratifiedKFold(y, n_folds=5):
tr_X, tr_y = X.iloc[train_idx], y.iloc[train_idx]
tst_X, tst_y = X.iloc[test_idx], y.iloc[test_idx]
wf_init = Workflow(self.initial_dag)
wf_init.fit(tr_X, tr_y, sample_weight=sample_weight)
preproc_X, preproc_y = eval.test_dag(self.initial_dag, wf_init.models, test_data=(tr_X, tr_y), output='all')
pp_tst_X = wf_init.transform(tst_X)
if pp_tst_X.empty:
continue
for i, dag in enumerate(self.sub_dags):
wf = Workflow(dag)
wf.fit(preproc_X, preproc_y)
res = wf.predict(pp_tst_X)
preds[i].append(pd.DataFrame(res, index=pp_tst_X.index))
preds = [pd.concat(ps) for ps in preds]
self.train = | pd.concat(preds, axis=1) | pandas.concat |
import os
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
import zigzag
from matplotlib.finance import candlestick2_ohlc
fpath = os.path.dirname(os.path.abspath(__file__))
fpath += '/data/ingest_data/'
load_file_name = 'binance_btc_usdt_4h.csv'
write_up_down_file_name = 'action_binance_btc_usdt_4h.csv'
chart_data = pd.read_csv(fpath + load_file_name, thousands=',', header=None)
chart_data.columns = ['date', 'open', 'high', 'low', 'close', 'volume']
chart_data['date'] = pd.to_datetime(chart_data['date'])
chart_data = chart_data[(chart_data['date'] >= '2017-11-01') & (chart_data['date'] <= '2018-02-01')]
high_low = []
trend = 0
open_a = chart_data.open.values
close_a = chart_data.close.values
low_a = chart_data.low.values
high_a = chart_data.high.values
ohlcv4_a = (open_a + close_a + low_a + high_a) / 4
for i in range(len(chart_data.date)):
open = open_a[i]
close = close_a[i]
low = low_a[i]
high = high_a[i]
if i == 0:
high_low.append(high if open < close else low)
continue
high_low.append(max(ohlcv4_a[i], ohlcv4_a[i - 1]))
X = np.array(high_low)
pivots = zigzag.peak_valley_pivots(X, 0.02, -0.01)
"""
위 변곡점: 1
아래 변곡점: -1
나머지: 0
swsong
위 꼭지점은 -1
아래 꼭지점은 1
번갈아 나오면 0점.
"""
hold_count = 1
left_hold_range = 0.15
right_hold_range = 0.01
# action = 'B', 'S', 'H'
actions = []
last_action = None
last_action_price = None
last_action_index = 0
highest_price = 0
lowest_price = 0
prev_pivot_index = 0
tmp_pivot = None
current_hold_count = hold_count
def get_next_pivot_index(index, hold_count):
next_index = None
for i in range(index + hold_count, len(pivots)):
if pivots[i] != 0:
next_index = i
break
return next_index
for index in range(len(pivots)):
price = close_a[index]
pivot = pivots[index]
act = None
if last_action is None:
# 처음엔 상태가 없으므로 매수
act = 'B'
tmp_pivot = 1
elif pivot != 0 or current_hold_count > 0:
# 홀드봉이 있을 경우.
# pivot 0 아닌 경우
act = 'H'
current_hold_count -= 1
if pivot != 0:
tmp_pivot = pivot
prev_pivot_index = index
if current_hold_count <= 0:
current_hold_count = hold_count
else:
next_pivot_index = get_next_pivot_index(index, 1)
if next_pivot_index is None:
act = 'H'
else:
print('--------------------------------------------')
print('date: {}'.format(chart_data['date'].values[index]))
total_count = next_pivot_index - prev_pivot_index
current_count = index - prev_pivot_index
act = 'H'
if tmp_pivot == -1:
is_left_hold_action = (total_count - current_count) / total_count < left_hold_range
is_right_hold_action = abs(lowest_price - price) / price < right_hold_range
print('매수')
print('left: {}, right: {}'.format(is_left_hold_action, is_right_hold_action))
if is_left_hold_action or is_right_hold_action:
act = 'H'
else:
act = 'B'
if tmp_pivot == 1:
is_left_hold_action = (total_count - current_count) / total_count < left_hold_range
is_right_hold_action = (highest_price - price) / price < right_hold_range
print('매도')
print('left: {}, right: {}'.format(is_left_hold_action, is_right_hold_action))
if is_left_hold_action or is_right_hold_action:
act = 'H'
else:
act = 'S'
print('act: {}, trends: {}'.format(act, tmp_pivot))
print('price: {}, lowest: {}, lowest: {}'.format(price, lowest_price, lowest_price))
print('--------------------------------------------')
if highest_price < price:
highest_price = price
if lowest_price > price:
lowest_price = price
if act != 'H':
last_action = act
last_action_price = price
last_action_index = index
highest_price = price
lowest_price = price
actions.append(act)
actions = np.array(actions)
fake_data = zip(range(len(actions)), chart_data.date, actions)
act_data = | pd.DataFrame([data for num, *data in fake_data], columns=['date', 'action']) | pandas.DataFrame |
import numpy as np
import numpy.testing as npt
import pandas as pd
from stumpy import aamped
from dask.distributed import Client, LocalCluster
import pytest
import warnings
import naive
@pytest.fixture(scope="module")
def dask_cluster():
cluster = LocalCluster(n_workers=2, threads_per_worker=2)
yield cluster
cluster.close()
test_data = [
(
np.array([9, 8100, -60, 7], dtype=np.float64),
np.array([584, -11, 23, 79, 1001, 0, -19], dtype=np.float64),
),
(
np.random.uniform(-1000, 1000, [8]).astype(np.float64),
np.random.uniform(-1000, 1000, [64]).astype(np.float64),
),
]
window_size = [8, 16, 32]
def test_aamp_int_input(dask_cluster):
with pytest.raises(TypeError):
with Client(dask_cluster) as dask_client:
aamped(dask_client, np.arange(10), 5)
@pytest.mark.filterwarnings("ignore:numpy.dtype size changed")
@pytest.mark.filterwarnings("ignore:numpy.ufunc size changed")
@pytest.mark.filterwarnings("ignore:numpy.ndarray size changed")
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_aamped_self_join(T_A, T_B, dask_cluster):
with Client(dask_cluster) as dask_client:
m = 3
ref_mp = naive.aamp(T_B, m)
comp_mp = aamped(dask_client, T_B, m)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.filterwarnings("ignore:numpy.dtype size changed")
@pytest.mark.filterwarnings("ignore:numpy.ufunc size changed")
@pytest.mark.filterwarnings("ignore:numpy.ndarray size changed")
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_aamped_self_join_df(T_A, T_B, dask_cluster):
with Client(dask_cluster) as dask_client:
m = 3
ref_mp = naive.aamp(T_B, m)
comp_mp = aamped(dask_client, pd.Series(T_B), m)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.filterwarnings("ignore:numpy.dtype size changed")
@pytest.mark.filterwarnings("ignore:numpy.ufunc size changed")
@pytest.mark.filterwarnings("ignore:numpy.ndarray size changed")
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("m", window_size)
def test_aamped_self_join_larger_window(T_A, T_B, m, dask_cluster):
with Client(dask_cluster) as dask_client:
if len(T_B) > m:
ref_mp = naive.aamp(T_B, m)
comp_mp = aamped(dask_client, T_B, m)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.filterwarnings("ignore:numpy.dtype size changed")
@pytest.mark.filterwarnings("ignore:numpy.ufunc size changed")
@pytest.mark.filterwarnings("ignore:numpy.ndarray size changed")
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("m", window_size)
def test_aamped_self_join_larger_window_df(T_A, T_B, m, dask_cluster):
with Client(dask_cluster) as dask_client:
if len(T_B) > m:
ref_mp = naive.aamp(T_B, m)
comp_mp = aamped(dask_client, pd.Series(T_B), m)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.filterwarnings("ignore:numpy.dtype size changed")
@pytest.mark.filterwarnings("ignore:numpy.ufunc size changed")
@pytest.mark.filterwarnings("ignore:numpy.ndarray size changed")
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_aamped_A_B_join(T_A, T_B, dask_cluster):
with Client(dask_cluster) as dask_client:
m = 3
ref_mp = naive.aamp(T_A, m, T_B=T_B)
comp_mp = aamped(dask_client, T_A, m, T_B, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.filterwarnings("ignore:numpy.dtype size changed")
@pytest.mark.filterwarnings("ignore:numpy.ufunc size changed")
@pytest.mark.filterwarnings("ignore:numpy.ndarray size changed")
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_aamped_A_B_join_df(T_A, T_B, dask_cluster):
with Client(dask_cluster) as dask_client:
m = 3
ref_mp = naive.aamp(T_A, m, T_B=T_B)
comp_mp = aamped(
dask_client, | pd.Series(T_A) | pandas.Series |
import pytest
import tubular.testing.test_data as d
import tubular.testing.helpers as h
import tubular
from tubular.numeric import LogTransformer
import numpy as np
import pandas as pd
import re
class TestInit(object):
"""Tests for LogTransformer.init()."""
def test_arguments(self):
"""Test that init has expected arguments."""
h.test_function_arguments(
func=LogTransformer.__init__,
expected_arguments=["self", "columns", "base", "add_1", "drop", "suffix"],
expected_default_values=(None, False, True, "log"),
)
def test_base_type_error(self):
"""Test that an exception is raised if base is non-numeric."""
with pytest.raises(
ValueError,
match=re.escape("base should be numeric or None"),
):
LogTransformer(
columns=["a"],
base="a",
new_column_name="b",
)
def test_base_not_strictly_positive_error(self):
"""Test that an exception is raised if base is not strictly positive."""
with pytest.raises(
ValueError,
match=re.escape("base should be strictly positive"),
):
LogTransformer(
columns=["a"],
base=0,
new_column_name="b",
)
def test_class_methods(self):
"""Test that LogTransformer has transform method."""
x = LogTransformer(columns="a")
h.test_object_method(obj=x, expected_method="transform", msg="transform")
def test_inheritance(self):
"""Test that LogTransformer inherits from BaseTransformer."""
x = LogTransformer(columns="a")
h.assert_inheritance(x, tubular.base.BaseTransformer)
def test_super_init_called(self, mocker):
"""Test that init calls BaseTransformer.init."""
expected_call_args = {
0: {
"args": (),
"kwargs": {"columns": ["a", "b"], "verbose": True, "copy": True},
}
}
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "__init__", expected_call_args
):
LogTransformer(
columns=["a", "b"],
add_1=True,
drop=True,
suffix="_new",
verbose=True,
copy=True,
)
def test_impute_values_set_to_attribute(self):
"""Test that the value passed for impute_value is saved in an attribute of the same name."""
x = LogTransformer(
columns=["a", "b"],
base=1,
add_1=True,
drop=False,
suffix="new",
verbose=True,
copy=True,
)
expected_attributes = {"base": 1, "add_1": True, "drop": False, "suffix": "new"}
h.test_object_attributes(
obj=x,
expected_attributes=expected_attributes,
msg="Attributes for LogTransformer set in init",
)
class TestTransform(object):
"""Tests for LogTransformer.transform()."""
def expected_df_1():
"""Expected output of test_expected_output_1."""
df = d.create_df_3()
df["a_new_col"] = np.log(df["a"])
df["b_new_col"] = np.log(df["b"])
df.drop(columns=["a", "b"], inplace=True)
return df
def expected_df_2():
"""Expected output of test_expected_output_2."""
df = d.create_df_3()
df["a_new_col"] = np.log(df["a"] + 1)
df["b_new_col"] = np.log(df["b"] + 1)
df.drop(columns=["a", "b"], inplace=True)
return df
def expected_df_3():
"""Expected output of test_expected_output_3."""
df = d.create_df_3()
df["a_new_col"] = np.log(df["a"])
df["b_new_col"] = np.log(df["b"])
return df
def expected_df_4():
"""Expected output of test_expected_output_4."""
df = d.create_df_3()
df["a_new_col"] = np.log(df["a"] + 1)
df["b_new_col"] = np.log(df["b"] + 1)
return df
def expected_df_5():
"""Expected output of test_expected_output_5."""
df = d.create_df_4()
df["a_new_col"] = np.log(df["a"] + 1) / np.log(5)
return df
def expected_df_6():
"""Expected output of test_expected_output_6."""
df = d.create_df_4()
df["a_new_col"] = np.log(df["a"]) / np.log(7)
df.drop("a", axis=1, inplace=True)
return df
def test_arguments(self):
"""Test that transform has expected arguments."""
h.test_function_arguments(
func=LogTransformer.transform, expected_arguments=["self", "X"]
)
def test_super_transform_called(self, mocker):
"""Test that BaseTransformer.transform called."""
df = d.create_df_3()
x = LogTransformer(columns=["a", "b"])
expected_call_args = {0: {"args": (d.create_df_3(),), "kwargs": {}}}
with h.assert_function_call(
mocker,
tubular.base.BaseTransformer,
"transform",
expected_call_args,
return_value=d.create_df_3(),
):
x.transform(df)
def test_error_with_non_numeric_columns(self):
"""Test an exception is raised if transform is applied to non-numeric columns."""
df = d.create_df_5()
x = LogTransformer(columns=["a", "b", "c"])
with pytest.raises(
TypeError, match=r"The following columns are not numeric in X; \['b', 'c'\]"
):
x.transform(df)
@pytest.mark.parametrize(
"df, expected",
h.row_by_row_params(d.create_df_3(), expected_df_1())
+ h.index_preserved_params(d.create_df_3(), expected_df_1()),
)
def test_expected_output_1(self, df, expected):
"""Test that transform is giving the expected output when not adding one and dropping original columns."""
x1 = LogTransformer(
columns=["a", "b"], add_1=False, drop=True, suffix="new_col"
)
df_transformed = x1.transform(df)
h.assert_equal_dispatch(
expected=expected,
actual=df_transformed,
msg="LogTransformer transform not adding 1 and dropping original columns",
)
@pytest.mark.parametrize(
"df, expected",
h.row_by_row_params(d.create_df_3(), expected_df_2())
+ h.index_preserved_params(d.create_df_3(), expected_df_2()),
)
def test_expected_output_2(self, df, expected):
"""Test that transform is giving the expected output when adding one and dropping original columns."""
x1 = LogTransformer(columns=["a", "b"], add_1=True, drop=True, suffix="new_col")
df_transformed = x1.transform(df)
h.assert_equal_dispatch(
expected=expected,
actual=df_transformed,
msg="LogTransformer transform adding 1 and dropping original columns",
)
@pytest.mark.parametrize(
"df, expected",
h.row_by_row_params(d.create_df_3(), expected_df_3())
+ h.index_preserved_params(d.create_df_3(), expected_df_3()),
)
def test_expected_output_3(self, df, expected):
"""Test that transform is giving the expected output when not adding one and not dropping original columns."""
x1 = LogTransformer(
columns=["a", "b"], add_1=False, drop=False, suffix="new_col"
)
df_transformed = x1.transform(df)
h.assert_equal_dispatch(
expected=expected,
actual=df_transformed,
msg="LogTransformer transform not adding 1 and dropping original columns",
)
@pytest.mark.parametrize(
"df, expected",
h.row_by_row_params(d.create_df_3(), expected_df_4())
+ h.index_preserved_params(d.create_df_3(), expected_df_4()),
)
def test_expected_output_4(self, df, expected):
"""Test that transform is giving the expected output when adding one and not dropping original columns."""
x1 = LogTransformer(
columns=["a", "b"], add_1=True, drop=False, suffix="new_col"
)
df_transformed = x1.transform(df)
h.assert_equal_dispatch(
expected=expected,
actual=df_transformed,
msg="LogTransformer transform not adding 1 and dropping original columns",
)
@pytest.mark.parametrize(
"df, expected",
h.row_by_row_params(d.create_df_4(), expected_df_5())
+ h.index_preserved_params(d.create_df_4(), expected_df_5()),
)
def test_expected_output_5(self, df, expected):
"""Test that transform is giving the expected output when adding one and not dropping
original columns and using base."""
x1 = LogTransformer(
columns=["a"], base=5, add_1=True, drop=False, suffix="new_col"
)
df_transformed = x1.transform(df)
h.assert_equal_dispatch(
expected=expected,
actual=df_transformed,
msg="LogTransformer transform not adding 1 and dropping original columns",
)
@pytest.mark.parametrize(
"df, expected",
h.row_by_row_params(d.create_df_4(), expected_df_6())
+ h.index_preserved_params(d.create_df_4(), expected_df_6()),
)
def test_expected_output_6(self, df, expected):
"""Test that transform is giving the expected output when not adding one and dropping
original columns and using base."""
x1 = LogTransformer(
columns=["a"], base=7, add_1=False, drop=True, suffix="new_col"
)
df_transformed = x1.transform(df)
h.assert_equal_dispatch(
expected=expected,
actual=df_transformed,
msg="LogTransformer transform should be using base, not adding 1, and not dropping original columns",
)
@pytest.mark.parametrize(
"df, columns, add_1, extra_exception_text",
(
[pd.DataFrame({"a": [1, 2, 0]}), ["a"], False, ""],
[pd.DataFrame({"a": [1, 2, 0], "b": [1, 2, 3]}), ["a", "b"], False, ""],
[pd.DataFrame({"a": [1, 2, -1]}), ["a"], True, r" \(after adding 1\)"],
[
pd.DataFrame({"a": [1, 2, -1], "b": [1, 2, 3]}),
["a", "b"],
True,
r" \(after adding 1\)",
],
[pd.DataFrame({"b": [1, 2, -0.001]}), ["b"], False, ""],
[
| pd.DataFrame({"b": [1, 2, -0.001], "a": [1, 2, 3]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
| tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
# ============================================================================
# getting fundamental data from financialmodelingprep.com
# Author - <NAME>
# Please report bugs/issues in the Q&A section
# =============================================================================
import requests
import pandas as pd
link = "https://financialmodelingprep.com/api/v3"
tickers = ["AXP"]
#list of tickerList whose financial data needs to be extracted
financial_dir = {}
for ticker in tickers:
try:
#getting balance sheet data
temp_dir = {}
url = link+"/financials/balance-sheet-statement/"+ticker
page = requests.get(url)
fin_dir = page.json()
for key,value in fin_dir["financials"][0].items():
temp_dir[key] = value
#getting income statement data
url = link+"/financials/income-statement/"+ticker
page = requests.get(url)
fin_dir = page.json()
for key,value in fin_dir["financials"][0].items():
if key not in temp_dir.keys():
temp_dir[key] = value
#getting cashflow statement data
url = link+"/financials/cash-flow-statement/"+ticker
page = requests.get(url)
fin_dir = page.json()
for key,value in fin_dir["financials"][0].items():
if key not in temp_dir.keys():
temp_dir[key] = value
#getting EV data
url = link+"/enterprise-value/"+ticker
page = requests.get(url)
fin_dir = page.json()
for key,value in fin_dir["enterpriseValues"][0].items():
if key not in temp_dir.keys():
temp_dir[key] = value
#getting key statistic data
url = link+"/company-key-metrics/"+ticker
page = requests.get(url)
fin_dir = page.json()
for key,value in fin_dir["metrics"][0].items():
if key not in temp_dir.keys():
temp_dir[key] = value
#combining all extracted information with the corresponding ticker
financial_dir[ticker] = temp_dir
except:
print("Problem scraping data for ",ticker)
#storing information in pandas dataframe
combined_financials = | pd.DataFrame(financial_dir) | pandas.DataFrame |
import pandas as pd
def format_date_hours(df: pd.DataFrame) -> pd.DataFrame:
"""
Format time and remove data points outside the 6-hourly scheme.
Parameters
----------
df : pd.DataFrame
The tracks DataFrame.
Return
-------
df_temp : pd.DataFrame
A copy of df with a proper datetime column, in format %Y-%m-%d, with hours 06:00, 12:00, 18:00, 00:00 only.
"""
df_temp = df.copy()
df_temp['Date'] = df_temp['Date'].astype(str)
df_temp['Date'] = df_temp['Date'].map(lambda x: x[:4] + '-' + x[4:6] + '-' + x[6:])
df_temp['Hour'] = df_temp['Hour'].map(lambda x: x[1:3] + ':' + x[3:])
df_temp['Time'] = df_temp['Hour'] + ' ' + df_temp['Date']
df_temp['Time'] = df_temp['Time'].map(lambda x: pd.to_datetime(x))
# Extraction of the 6-hourly scheme
hours = ['06:00', '12:00', '18:00', '00:00']
filt = df_temp.Hour.map(lambda x: x in hours)
df_temp = df_temp.loc[filt]
df_temp.reset_index(drop=True, inplace=True)
# Reality Check
if set(df_temp.Hour.unique()) != set(hours):
raise ValueError('The extraction of the 6-hourly scheme failed.')
df_temp.drop(columns=['Date', 'Hour'], inplace=True)
return df_temp
def fill_radii(df: pd.DataFrame) -> pd.DataFrame:
"""
Remove some missing values from radii columns of `df`.
By definition a low (resp. med, high) `wind radius` is the maximal distance
from the center of the hurricane where winds stronger than 34 (resp. 50, 64) knots
were observed.
We use this definition to fill some missing values. E.g. if the `Max_Speed`is lower
than 34 knots, all radii have to be 0.
Parameters
----------
df : pd.DataFrame
The tracks DataFrame.
Return
-------
df_temp : pd.DataFrame
A copy of df with partially completed missing values for the radii columns.
"""
df_temp = df.copy()
# We extract the names of the columns corresponding to radii
rad_cols = [col for col in df_temp.columns if 'Rad' in col]
speeds = [0, 34, 50, 64]
# For each of the three speeds relevant to wind radii, we create a dictionary setting some radii to 0.
# For speeds lower than 34, all radii are set to 0.
dict_low = {col: 0.0 for col in rad_cols}
# For speeds lower than 50, greater than 34, medium and high radii are set to 0.
dict_med = {col: 0.0 for col in rad_cols[4:]}
# For speeds lower than 64, greater than 50, high radii are set to 0.
dict_high = {col: 0.0 for col in rad_cols[8:]}
dicts = [dict_low, dict_med, dict_high]
for i in range(len(speeds) - 1):
filt = (df_temp.Max_Speed > speeds[i]) & (df_temp.Max_Speed <= speeds[i + 1])
df_temp.loc[filt] = df_temp.loc[filt].fillna(dicts[i])
return df_temp
def format_lon_lat(df: pd.DataFrame) -> pd.DataFrame:
"""
Format longitude and latitude values from strings to integers.
Longitudes as strings are given in a format `mI` where m is a number and I
is either `W` (west) or `E` (east) with respect to the Greenwich meridian.
West coordinates are meant to be negative and east are positive.
Latitudes as strings are given in a format `mI` where m is a number and I
is either `N` (north) or `S` (south) with respect to the equator.
South coordinates are meant to be negative and north are positive.
Parameters
----------
df : pd.DataFrame
The tracks DataFrame.
Return
-------
df_temp : pd.DataFrame
A copy of df with proper longitudes and latitudes.
"""
df_temp = df.copy()
df_temp.Longitude = df_temp.Longitude.map(lambda x: -1*float(x[:-1]) if x[-1] == 'W' else float(x[:-1]))
df_temp.Latitude = df_temp.Latitude.map(lambda x: -1*float(x[:-1]) if x[-1] == 'S' else float(x[:-1]))
return df_temp
def cleaning_pipeline(files_dir: str, track_name: str = 'df_tracks.csv',
new_name: str = 'df_tracks_after_1970', year: int = 1970):
"""
Cleans the data from df_tracks and saves the data with year >= `year`into a separate DataFrame.
Parameters
----------
files_dir: str
Path to the directory which contains the df_tracks DataFrame.
track_name: str
Name of the file containing df_tracks.
new_name: str
Name to use for saving the filtered data.
year: int
The year to use as a lower bound
Return
------
"""
tracks_path = files_dir + track_name
# Necessary to set the dtype of Hour column, otherwise pandas infers int which yields errors
df_tracks = | pd.read_csv(tracks_path, header=0, index_col=0, dtype={'Hour': str}) | pandas.read_csv |
import random
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=pd.Index([1, 0]),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2] + expected_idx_non_na
if na_position == "first"
else expected_idx_non_na + [2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
with pytest.raises(ValueError, match="This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT.value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=[1, 0],
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(
{"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values},
columns=["datetime", "float"],
)
df_reversed = DataFrame(
{"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]},
columns=["datetime", "float"],
index=[1, 0],
)
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories(self):
# GH#22556
# Positioning missing value properly when column is Categorical.
categories = ["A", "B", "C"]
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = "first"
na_position_last = "last"
column_name = "c"
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices)
df = DataFrame(
{
column_name: Categorical(
["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True
)
}
)
# sort ascending with na first
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + categories, categories=categories, ordered=True
)
},
index=na_indices + category_indices,
)
tm.assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
categories + list_of_nans, categories=categories, ordered=True
)
},
index=category_indices + na_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + reversed_categories,
categories=categories,
ordered=True,
)
},
index=reversed_na_indices + reversed_category_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
reversed_categories + list_of_nans,
categories=categories,
ordered=True,
)
},
index=reversed_category_indices + reversed_na_indices,
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nat(self):
# GH#16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories_raises(self):
df = DataFrame(
{
"c": Categorical(
["A", np.nan, "B", np.nan, "C"],
categories=["A", "B", "C"],
ordered=True,
)
}
)
with pytest.raises(ValueError, match="invalid na_position: bad_position"):
df.sort_values(by="c", ascending=False, na_position="bad_position")
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, False, [2, 1, 0]),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
True,
[0, 1, 2],
),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
False,
[2, 1, 0],
),
],
)
def test_sort_values_ignore_index(
self, inplace, original_dict, sorted_dict, ignore_index, output_index
):
# GH 30114
df = DataFrame(original_dict)
expected = DataFrame(sorted_dict, index=output_index)
kwargs = {"ignore_index": ignore_index, "inplace": inplace}
if inplace:
result_df = df.copy()
result_df.sort_values("A", ascending=False, **kwargs)
else:
result_df = df.sort_values("A", ascending=False, **kwargs)
tm.assert_frame_equal(result_df, expected)
tm.assert_frame_equal(df, DataFrame(original_dict))
def test_sort_values_nat_na_position_default(self):
# GH 13230
expected = DataFrame(
{
"A": [1, 2, 3, 4, 4],
"date": pd.DatetimeIndex(
[
"2010-01-01 09:00:00",
"2010-01-01 09:00:01",
"2010-01-01 09:00:02",
"2010-01-01 09:00:03",
"NaT",
]
),
}
)
result = expected.sort_values(["A", "date"])
tm.assert_frame_equal(result, expected)
def test_sort_values_item_cache(self, using_array_manager):
# previous behavior incorrect retained an invalid _item_cache entry
df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"])
df["D"] = df["A"] * 2
ser = df["A"]
if not using_array_manager:
assert len(df._mgr.blocks) == 2
df.sort_values(by="A")
ser.values[0] = 99
assert df.iloc[0, 0] == df["A"][0]
def test_sort_values_reshaping(self):
# GH 39426
values = list(range(21))
expected = DataFrame([values], columns=values)
df = expected.sort_values(expected.index[0], axis=1, ignore_index=True)
tm.assert_frame_equal(df, expected)
class TestDataFrameSortKey: # test key sorting (issue 27237)
def test_sort_values_inplace_key(self, sort_by_key):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True, key=sort_by_key)
assert return_value is None
expected = frame.sort_values(by="A", key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=1, axis=1, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by=1, axis=1, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by="A", ascending=False, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True, key=sort_by_key
)
expected = frame.sort_values(by=["A", "B"], ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_key(self):
df = DataFrame(np.array([0, 5, np.nan, 3, 2, np.nan]))
result = df.sort_values(0)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: x + 5)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
result = df.sort_values("a", key=lambda x: -x)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x)
expected = df.iloc[[3, 1, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 1, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key_by_name(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
def key(col):
if col.name == "a":
return -col
else:
return col
result = df.sort_values(by="a", key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by="b", key=key)
expected = df.iloc[[0, 1, 4, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_string(self):
df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]]))
result = df.sort_values(1)
expected = df[::-1]
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
from statistics import *
from pandas.core.algorithms import factorize
from scipy.stats import gmean
from sklearn import preprocessing
from sklearn.neighbors import LocalOutlierFactor
from ycimpute.imputer import knnimput, EM
import pandas as pd
import numpy as np
import researchpy as rp
pd.options.mode.chained_assignment = None
class DataScienceKit():
def __init__(self) -> None:
pass
class MeasurementUnits():
def __init__(self):
self.data_operations = DataScienceKit.DataOperations()
self.exploratory_data_analysis = DataScienceKit.ExploratoryDataAnalysis()
# Arithmetic Mean
# numeric_col: Numeric DataFrame Column
def arithmetic_mean(self, dataset, numerical_column_name):
if(self.exploratory_data_analysis.is_numerical(dataset, numerical_column_name)):
if(self.data_operations.there_any_NaN_values_column(dataset, numerical_column_name) != True):
return mean(dataset[numerical_column_name])
else:
return ValueError(f'NaN Value Error: There is missing (NaN) data in {numerical_column_name} ')
else:
return ValueError(f'Variable Type Error: {numerical_column_name} is not a Numerical Variable ')
# Geometric Mean
# numerical_col: Numeric DataFrame Column
def geometric_mean(self, dataset, numerical_column_name):
if(self.exploratory_data_analysis.is_numerical(dataset, numerical_column_name)):
if(self.data_operations.there_any_NaN_values_column(dataset, numerical_column_name) != True):
return gmean(dataset[numerical_column_name])
else:
return ValueError(f'NaN Value Error: There is missing (NaN) data in {numerical_column_name} ')
else:
return ValueError(f'Variable Type Error: {numerical_column_name} is not a Numerical Variable ')
# Harmonic Mean
# numeric_col: Numeric DataFrame Column
def harmonic_mean(self, dataset, numerical_column_name):
if(self.exploratory_data_analysis.is_numerical(dataset, numerical_column_name)):
if(self.data_operations.there_any_NaN_values_column(dataset, numerical_column_name) != True):
return harmonic_mean(dataset[numerical_column_name])
else:
return ValueError(f'NaN Value Error: There is missing (NaN) data in {numerical_column_name} ')
else:
return ValueError(f'Variable Type Error: {numerical_column_name} is not a Numerical Variable ')
# Median
# numeric_col: Numeric DataFrame Column
# median_type: 1: Median, 2: Median Low, 3: Median High, 4: Median Grouped
def median(self, dataset, numerical_column_name, median_type=1, interpolation=1):
if(median_type == 1):
return median(dataset[numerical_column_name])
elif(median_type == 2):
return median_low(dataset[numerical_column_name])
elif(median_type == 3):
return median_high(dataset[numerical_column_name])
elif(median_type == 4):
if(self.exploratory_data_analysis.is_numerical(dataset, numerical_column_name)):
if(self.data_operations.there_any_NaN_values_column(dataset, numerical_column_name) != True):
return median_grouped(dataset[numerical_column_name], interval=interpolation)
else:
return ValueError(f'NaN Value Error: There is missing (NaN) data in {numerical_column_name} ')
else:
return ValueError(f'Variable Type Error: {numerical_column_name} is not a Numerical Variable')
else:
return ValueError("Invalid Median Type: Takes a value between 1 and 4")
# Mode
# numeric_col: Numeric DataFrame Column
# mode_type: 1: Mode, 2: Count Mode Value
def mode(self, dataset, numerical_column_name, mode_type=1):
if(mode_type == 1):
return mode(dataset[numerical_column_name])
elif(mode_type == 2):
return dataset[numerical_column_name].count(mode(dataset[numerical_column_name]))
else:
return ValueError("Invalid Mode Type: Takes a value between 1 and 2")
# Variance
# numeric_col: Numeric DataFrame Column
# mean_data: If the mean value of the numeric column is not given as a parameter, None should remain
def variance(self, dataset, numerical_column_name):
if(self.exploratory_data_analysis.is_numerical(dataset, numerical_column_name)):
if(self.data_operations.there_any_NaN_values_column(dataset, numerical_column_name) != True):
return variance(dataset[numerical_column_name])
else:
return ValueError(f'NaN Value Error: There is missing (NaN) data in {numerical_column_name} ')
else:
return ValueError(f'Variable Type Error: {numerical_column_name} is not a Numerical Variable ')
# Standart Deviation
# numeric_col: Numeric DataFrame Column
def standart_deviation(self, dataset, numerical_column_name):
if(self.exploratory_data_analysis.is_numerical(dataset, numerical_column_name)):
if(self.data_operations.there_any_NaN_values_column(dataset, numerical_column_name) != True):
return stdev(dataset[numerical_column_name])
else:
return ValueError(f'NaN Value Error: There is missing (NaN) data in {numerical_column_name} ')
else:
return ValueError(f'Variable Type Error: {numerical_column_name} is not a Numerical Variable ')
# Minimum Value
# numeric_col: Numeric DataFrame Column
def minimum_value(self, dataset, numerical_column_name):
return min(dataset[numerical_column_name])
# Maximum Value
# numeric_col: Numeric DataFrame Column
def maximum_value(self, dataset, numerical_column_name):
return max(dataset[numerical_column_name])
# Kurtosis
# data_matrix: Numeric DataFrame Columns data_matrix = [dataset['number'], dataset['distance']]
def kurtosis(self, data_matrix):
df = pd.DataFrame(data=data_matrix)
return df.kurt(axis=1)
# Skew
# data_matrix: Numeric DataFrame Columns data_matrix = [dataset['number'], dataset['distance']]
def skewnewss(self, data_matrix):
df = | pd.DataFrame(data=data_matrix) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#
#################
# This script takes as an input the data from Saunois et al. (2020),
# imported in csv format from the original Global_Methane_Budget_2000_2017_v2.0_full.xlsx
# file, and returns a csv file with CH4 flux data for 2017 and 2008-2017.
# Data is provided for both the Top-Down (TD) and Bottom-Up (BU) estimates.
#
# Last updated: Jan 2021
# Author: <NAME>
#
#################
import numpy as np
import pandas as pd
periods = ['2017', '2008_2017']
for period in periods:
######### Get Bottom Up global data #########
data_BU = pd.read_csv('../source/global_BU_'+period+'_clean.csv', header=0)
# Drop spurious row
data_BU = data_BU.drop([0], axis=0)
# Rename columns
data_BU['Sink/source type'] = data_BU['Emission totals']
data_BU.loc[data_BU['Sink/source type'] == 'Wetland','Sink/source type'] = 'Wetlands'
data_BU['Original data source'] = data_BU['Regions']
data_BU['Value (Tg CH4 yr-1)'] = round(data_BU['GLOBAL'], 0).astype(int)
#data_BU['Units'] = 'Tg CH4 yr-1'
data_BU['Estimate type'] = 'Bottom-up'
data_BU = data_BU[['Sink/source type', 'Estimate type','Original data source', 'Value (Tg CH4 yr-1)']].copy()
# Report maximum and minimum values
for flux_type in data_BU['Sink/source type'].unique():
# Get flux type
data_BU_chunk = data_BU[ data_BU['Sink/source type'] == flux_type].copy()
# Get max
data_BU = data_BU.append(
data_BU_chunk[
data_BU_chunk['Value (Tg CH4 yr-1)'] ==
data_BU_chunk['Value (Tg CH4 yr-1)'].max()].drop_duplicates(
subset=['Value (Tg CH4 yr-1)']), ignore_index=True
)
data_BU.loc[len(data_BU.index)-1, 'Original data source'] = 'Ensemble max'
# Get min
data_BU = data_BU.append(
data_BU_chunk[
data_BU_chunk['Value (Tg CH4 yr-1)'] ==
data_BU_chunk['Value (Tg CH4 yr-1)'].min()].drop_duplicates(
subset=['Value (Tg CH4 yr-1)']), ignore_index=True
)
data_BU.loc[len(data_BU.index)-1, 'Original data source'] = 'Ensemble min'
######### Get Top Down global data #########
data_TD = pd.read_csv('../source/global_TD_'+period+'_clean.csv', header=0)
# # Drop spurious row
data_TD = data_TD.drop([0], axis=0)
# Rename columns
data_TD['Sink/source type'] = data_TD['Emission totals']
data_TD['Original data source'] = data_TD['Regions']
data_TD['Value (Tg CH4 yr-1)'] = round(data_TD['GLOBAL'], 0).astype(int)
#data_TD['Units'] = 'Tg CH4 yr-1'
data_TD['Estimate type'] = 'Top-down'
data_TD = data_TD[['Sink/source type', 'Estimate type','Original data source', 'Value (Tg CH4 yr-1)']].copy()
# Report maximum and minimum values
for flux_type in data_TD['Sink/source type'].unique():
# Get flux type
data_TD_chunk = data_TD[ data_TD['Sink/source type'] == flux_type].copy()
# Get max
data_TD = data_TD.append(
data_TD_chunk[
data_TD_chunk['Value (Tg CH4 yr-1)'] ==
data_TD_chunk['Value (Tg CH4 yr-1)'].max()].drop_duplicates(
subset=['Value (Tg CH4 yr-1)']), ignore_index=True
)
data_TD.loc[len(data_TD.index)-1, 'Original data source'] = 'Ensemble max'
# Get min
data_TD = data_TD.append(
data_TD_chunk[
data_TD_chunk['Value (Tg CH4 yr-1)'] ==
data_TD_chunk['Value (Tg CH4 yr-1)'].min()].drop_duplicates(
subset=['Value (Tg CH4 yr-1)']), ignore_index=True
)
data_TD.loc[len(data_TD.index)-1, 'Original data source'] = 'Ensemble min'
# # Concatenate TD and BU data, order
total = | pd.concat([data_TD,data_BU], axis=0, ignore_index=True) | pandas.concat |
from keras.models import model_from_json
from os import path
import sys, json, time
# lstm autoencoder predict sequence
from pandas import DataFrame, Series
import numpy as np
from numpy import array
from sklearn.preprocessing import OneHotEncoder, LabelEncoder,minmax_scale
# Get Directory
BASE_DIR = path.dirname(path.dirname(path.abspath(__file__)))
# Read input data
print('Reading Input Data')
# Model Build output
model_path=path.join(BASE_DIR, 'data/model_build_outputs/model3.json')
with open(model_path, newline='') as in_file:
model_build_out = json.load(in_file)
# Prediction Routes (Model Apply input)
prediction_routes_path = path.join(BASE_DIR, 'data/model_apply_inputs/new_route_data.json')
with open(prediction_routes_path, newline='') as in_file:
prediction_routes = json.load(in_file)
X_allR,route_stopsapp,route_highscore,Y_allR,predseq,predseq2,outputallR = [],[],[],[],[],[],[]
####_______________________________________________
## specify number of routes
route_data_sample = {k: prediction_routes[k] for k, _ in zip(prediction_routes,range(100))}
route_data_subsample_HS = {k: prediction_routes[k] for k, _ in zip(route_data_sample,range(100))}
print(len(route_data_subsample_HS))
for krml,vrml in prediction_routes.items():
route_stopsml = prediction_routes[krml]['stops']
route_stopsapp.append(route_stopsml)
lenm = len(route_stopsml)
print(lenm)
max_len = 222
print(max_len)
for kr,vr in prediction_routes.items():
route_stops = prediction_routes[kr]['stops']
dic = {d for d, vd in route_stops.items() if vd['type'] == 'Station'}
depot = list(dic)[0]
index = list(route_stops.keys()).index(depot)
stops_list = [{**value, **{'id':key}} for key, value in route_stops.items()]
stops_list.insert(0, stops_list.pop(index))
lbl_encode = LabelEncoder()
stops_list_id = lbl_encode.fit_transform([i['id'] for i in stops_list])
stops_list_zone = lbl_encode.fit_transform([i['zone_id'] for i in stops_list])
stops_list_lat = [i['lat'] for i in stops_list]
stops_list_lng = [i['lng']*-1 for i in stops_list]
stops_list_features = list(zip(stops_list_id,stops_list_lat,stops_list_lng))
stops_list_featuresnew = list(zip(stops_list_zone,stops_list_lat,stops_list_lng))
df = | DataFrame(stops_list_features) | pandas.DataFrame |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Empty
import os
import csv
import time
import pandas as pd
import matplotlib
matplotlib.use('Agg')
#import matplotlib.pyplot as plt
#import sys
class Plots:
def __init__(self, path, param):
self.path = path
self.param = param
rospy.Subscriber("/csv/end", Empty, self.Plot)
self.restart_pub = rospy.Publisher('/restart', Empty, queue_size=1)
rospy.spin()
def Plot(self, data):
odometry = pd.read_csv(os.path.join(self.path,'odometry.csv'))
odometry.rename(columns={'X': r'$x$', 'Y': r'$y$', 'Z': r'$z$', 'Roll': r'$\phi$', 'Pitch': r'$\theta$', 'Yaw': r'$\psi$'}, inplace = True)
odometry_gt = pd.read_csv(os.path.join(self.path,'odometry_gt.csv'))
odometry_gt.rename(columns={'X': r'$x$', 'Y': r'$y$', 'Z': r'$z$', 'Roll': r'$\phi$', 'Pitch': r'$\theta$', 'Yaw': r'$\psi$'}, inplace = True)
reference = pd.read_csv(os.path.join(self.path,'reference.csv'))
reference.rename(columns={'X': r'$x$', 'Y': r'$y$', 'Z': r'$z$', 'Roll': r'$\phi$', 'Pitch': r'$\theta$', 'Yaw': r'$\psi$'}, inplace = True)
referenceGamma = pd.read_csv(os.path.join(self.path,'referenceGamma.csv'))
referenceGamma.rename(columns={'X': r'$x$', 'Y': r'$y$', 'Z': r'$z$', 'Roll': r'$\phi$', 'Pitch': r'$\theta$', 'Yaw': r'$\psi$'}, inplace = True)
'''
odometry_df = odometry.loc[:, ['Tiempo',r'$x$',r'$y$',r'$z$',r'$\phi$',r'$\theta$',r'$\psi$']]
odometry_gt_df = odometry_gt.loc[:, ['Tiempo',r'$x$',r'$y$',r'$z$',r'$\phi$',r'$\theta$',r'$\psi$']]
reference_df = reference.loc[:, ['Tiempo',r'$x$',r'$y$',r'$z$',r'$\phi$',r'$\theta$',r'$\psi$']]
referenceGamma_df = referenceGamma.loc[:, ['Tiempo',r'$x$',r'$y$',r'$z$',r'$\phi$',r'$\theta$',r'$\psi$']]
'''
odometry_df = odometry.loc[:, ['Tiempo',r'$x$',r'$y$',r'$z$',r'$\psi$']]
odometry_gt_df = odometry_gt.loc[:, ['Tiempo',r'$x$',r'$y$',r'$z$',r'$\psi$']]
reference_df = reference.loc[:, ['Tiempo',r'$x$',r'$y$',r'$z$',r'$\psi$']]
referenceGamma_df = referenceGamma.loc[:, ['Tiempo',r'$x$',r'$y$',r'$z$',r'$\psi$']]
odometry_df.plot(x=0,grid=True,title='Odometry').get_figure().savefig(os.path.join(self.path,'odometry.png'))
odometry_gt_df.plot(x=0,grid=True,title='Odometry_GT').get_figure().savefig(os.path.join(self.path,'odometry_gt.png'))
reference_df.plot(x=0,grid=True,title='Reference').get_figure().savefig(os.path.join(self.path,'reference.png'))
referenceGamma_df.plot(x=0,grid=True,title='Reference_Gamma').get_figure().savefig(os.path.join(self.path,'referenceGamma.png'))
errors = {}
#cambios= {}
#for ax in [r'x',r'y',r'z',r'\phi',r'\theta',r'\psi']:
for ax in [r'x',r'y',r'z',r'\psi']:
ax_orig = r'$' + ax + r'$'
ax_odom = r'$' + ax + r'_{odom}$'
ax_gt = r'$' + ax + r'_{gt}$'
ax_ref = r'$' + ax + r'_{ref}$'
ax_gamma = r'$' + ax + r'_{gamma}$'
ax_err = r'$e_{' + ax + r'}$'
odometry_ = odometry_df.loc[:,['Tiempo',ax_orig]]
odometry_.rename(columns={ax_orig: ax_odom},inplace = True)
odometry_gt_ = odometry_gt_df.loc[:,['Tiempo',ax_orig]]
odometry_gt_.rename(columns={ax_orig: ax_gt},inplace = True)
reference_ = reference_df.loc[:,['Tiempo',ax_orig]]
reference_.rename(columns={ax_orig: ax_ref},inplace = True)
referenceGamma_ = referenceGamma_df.loc[:,['Tiempo',ax_orig]]
referenceGamma_.rename(columns={ax_orig: ax_gamma},inplace = True)
df = pd.merge(odometry_, odometry_gt_, on='Tiempo', how='inner')
df = | pd.merge(df, reference_, on='Tiempo', how='inner') | pandas.merge |
#Flask is a pythonic and minimalistic web framework, a perfect candidate for this task
from flask import Flask
from flask import request
from flask import jsonify
app = Flask(__name__)
#Due to running the front-end and back-end on two different adresses/ports, we need CORS
from flask_cors import CORS, cross_origin
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
#Pandas is a high-performance data manipulation tool
import json
import pandas as pd
users = json.load(open('data.json'))
users = pd.DataFrame(users)
#Cleaning the data is necessary for healthy perfomance
users = users.dropna(subset = ['id', 'firstName'])
users = users.fillna("(private)")
#Getting the column names, for later usage
cols = [col for col in users]
#Helper functions
def to_dict_array(data):
return [dict(row[1]) for row in data.iterrows()]
def get_user(id):
return to_dict_array(users[users["id"]==int(id)])[0]
def top(n):
return to_dict_array(users.head(n))
#Greeting message
@app.route("/")
def welcome():
return "<h3>Welcome to the Business Network Backend!</h3>"
#Allowing real-time search of users based on their names or gender
@app.route("/search")
@cross_origin()
def search():
query = request.args.get('query')
if query == None:
return jsonify(top(12))
elif query == "male":
return jsonify(to_dict_array(users[users["gender"]=="male"].head(15)))
elif query == "female":
return jsonify(to_dict_array(users[users["gender"]=="female"].head(15)))
else:
return jsonify(to_dict_array(users[(users["firstName"] + users["surname"]).str.lower().str.contains(query.lower())]))
#Returning the data about a singe user based on his ID
@app.route("/user")
@cross_origin()
def user():
id = request.args.get('id')
return jsonify(get_user(id))
#Searching user's fist and second level connections
@app.route("/friends")
def friends():
id = request.args.get('id')
level = request.args.get('level')
user = get_user(id)
friends = users[users["id"].isin(user['friends'])]
return_data = ""
if(level == "1"):
return_data = to_dict_array(friends)
elif(level == "2"):
f_two = | pd.DataFrame() | pandas.DataFrame |
"""unit test for loanpy.sanity.py (2.0 BETA) for pytest 7.1.1"""
from collections import OrderedDict
from datetime import datetime
from os import remove
from pathlib import Path
from time import struct_time
from unittest.mock import call, patch
from numpy import nan
from pandas import DataFrame, Series, read_csv
from pandas.testing import assert_frame_equal, assert_series_equal
from pytest import raises
from loanpy.sanity import (
ArgumentsAlreadyTested,
cache,
check_cache,
eval_adapt,
eval_recon,
eval_all,
eval_one,
get_crossval_data,
get_dist,
get_nse4df,
get_noncrossval_sc,
get_tpr_fpr_opt,
loop_thru_data,
make_stat,
plot_roc,
postprocess,
postprocess2,
phonotactics_predicted,
write_to_cache)
from loanpy import sanity
def test_cache():
"""Is cache read and written to correctly?"""
with patch("loanpy.sanity.check_cache") as check_cache_mock:
check_cache_mock.return_value = None
with patch("loanpy.sanity.write_to_cache") as write_to_cache_mock:
write_to_cache_mock.return_value = None
@cache
def mockfunc(*args, **kwargs): return 1, 2, 3, 4
# without kwarg
assert mockfunc(path2cache=4) is None
check_cache_mock.assert_called_with(4, {'path2cache': 4})
write_to_cache_mock.assert_called_with(
4, {'path2cache': 4}, 2, 3, 4)
# with kwarg
assert mockfunc(4, 5, 6, path2cache=7) is None
check_cache_mock.assert_called_with(7, {
'path2cache': 7, 'forms_csv': 4, 'tgt_lg': 5, 'src_lg': 6})
write_to_cache_mock.assert_called_with(7, {
'path2cache': 7, 'forms_csv': 4,
'tgt_lg': 5, 'src_lg': 6}, 2, 3, 4)
def test_eval_all():
"""Is the main function doing its job in evaluating etymological data?"""
class AdrcMonkey:
def __init__(self):
self.dfety = 12345
adrc_monkey = AdrcMonkey()
# patch all functions. wrapper counts as part of eval_all
with patch("loanpy.sanity.check_cache") as check_cache_mock:
check_cache_mock.return_value = "cc"
with patch("loanpy.sanity.write_to_cache") as write_to_cache_mock:
write_to_cache_mock.return_value = "w2c"
with patch("loanpy.sanity.time", side_effect=[5, 5]) as time_mock:
with patch("loanpy.sanity.Adrc") as adrc_mock:
adrc_mock.return_value = AdrcMonkey
with patch("loanpy.sanity.loop_thru_data"
) as loop_thru_data_mock:
loop_thru_data_mock.return_value = "ld"
with patch("loanpy.sanity.postprocess"
) as postprocess_mock:
postprocess_mock.return_value = adrc_monkey
with patch("loanpy.sanity.postprocess2"
) as postprocess2_mock:
postprocess2_mock.return_value = 987 # stat
assert eval_all(9, 9, 9) == (12345, 987, 5, 5)
# assert calls
check_cache_mock.assert_not_called()
write_to_cache_mock.assert_not_called()
time_mock.assert_has_calls([], [])
adrc_mock.assert_called_with(
forms_csv=9,
source_language=9,
target_language=9,
mode='adapt',
most_frequent_phonotactics=9999999,
phonotactic_inventory=None,
connector=None,
scdictbase=None,
vfb=None)
loop_thru_data_mock.assert_called_with(
AdrcMonkey, False, False, False, False, 1, 1, 100, 49, False, [
10, 50, 100, 500, 1000], 'adapt', False, True)
postprocess_mock.assert_called_with("ld")
postprocess2_mock.assert_called_with(
adrc_monkey,
[10, 50, 100, 500, 1000],
"adapt",
None)
del AdrcMonkey, adrc_monkey
def test_loop_thru_data():
"""Is cross-validation called and loop run?"""
# set up expected output
df_exp = DataFrame({
"guesses": [1], "best_guess": [2],
"workflow_step0": [3], "workflow_step1": [4]
})
dfforms_mock = DataFrame({
"Source_Form": ["apple", "banana", "cherry"],
"Target_Form": ["Apfel", "Banane", "Kirsche"]
})
out1_eval_one = {"best_guess": "abc", "guesses": 1}
out2_eval_one = {"best_guess": "def", "guesses": 2}
out3_eval_one = {"best_guess": "ghi", "guesses": 3}
class AdrcMonkey:
def __init__(self):
self.forms_target_language = ["Apfel", "Banane", "Kirsche"]
self.dfety = dfforms_mock
def tqdm_mock(iterable):
tqdm_mock.called_with = iterable
return iterable
tqdm_real, sanity.tqdm = sanity.tqdm, tqdm_mock
adrc_monkey = AdrcMonkey()
idxlist = iter([0, 1, 2])
forms = iter(["Apfel", "Banane", "Kirsche"])
# first patch is not called by default
with patch("loanpy.sanity.get_noncrossval_sc") as get_noncrossval_sc_mock:
with patch("loanpy.sanity.get_crossval_data",
side_effect=[adrc_monkey] * 3) as get_crossval_data_mock:
adrc_monkey.idx_of_popped_word = next(idxlist)
adrc_monkey.popped_word = next(forms)
with patch("loanpy.sanity.eval_one",
side_effect=[out1_eval_one,
out2_eval_one, out3_eval_one]
) as eval_one_mock:
with patch("loanpy.sanity.DataFrame") as DataFrame_mock:
DataFrame_mock.return_value = "dfoutmock"
with patch("loanpy.sanity.concat") as concat_mock:
concat_mock.return_value = df_exp
assert loop_thru_data(
adrc_monkey, 1, 1, 100, 49,
False, False, False, False, False,
[10, 50, 100, 500, 1000],
'adapt', False, True) == adrc_monkey
# assert dfety was plugged in with good col names
assert_frame_equal(adrc_monkey.dfety, df_exp)
# assert calls
# assert 1st patch not called
get_noncrossval_sc_mock.assert_not_called()
get_crossval_data_mock.assert_has_calls([
call(adrc_monkey, 0, False),
call(adrc_monkey, 1, False),
call(adrc_monkey, 2, False)
])
eval_one_mock.assert_has_calls([
call("Apfel", adrc_monkey, "apple", 1, 1, 100, 49, False,
False, False, False, False, [10, 50, 100, 500, 1000], 'adapt'),
call("Banane", adrc_monkey, "banana", 1, 1, 100, 49, False,
False, False, False, False, [10, 50, 100, 500, 1000], 'adapt'),
call("Kirsche", adrc_monkey, "cherry", 1, 1, 100, 49, False,
False, False, False, False, [10, 50, 100, 500, 1000], 'adapt')
])
DataFrame_mock.assert_called_with({'best_guess': ['abc', 'def', 'ghi'],
'guesses': [1, 2, 3]})
concat_mock.assert_called_with([dfforms_mock, "dfoutmock"], axis=1)
assert isinstance(tqdm_mock.called_with, enumerate)
# tear down
sanity.tqdm.called_with = None
# 2nd assertion: loop with crossval=True
# fresh instance (old got modified)
adrc_monkey = AdrcMonkey()
with patch("loanpy.sanity.get_noncrossval_sc") as get_noncrossval_sc_mock:
get_noncrossval_sc_mock.return_value = adrc_monkey
with patch("loanpy.sanity.get_crossval_data"
) as get_crossval_data_mock:
with patch("loanpy.sanity.eval_one",
side_effect=[out1_eval_one,
out2_eval_one, out3_eval_one]
) as eval_one_mock:
with patch("loanpy.sanity.DataFrame") as DataFrame_mock:
DataFrame_mock.return_value = "dfoutmock"
with patch("loanpy.sanity.concat") as concat_mock:
concat_mock.return_value = DataFrame(
[(1, 2, 3, 4)],
columns=['guesses', 'best_guess',
'workflow_step0', 'workflow_step1'])
assert loop_thru_data(
adrc_monkey, 1, 1, 100, 49,
False, False, False, False, False,
[10, 50, 100, 500, 1000],
'adapt', False, False) == adrc_monkey
print(adrc_monkey.dfety)
assert_frame_equal(adrc_monkey.dfety, df_exp)
# assert calls
# assert 1st patch not called
get_crossval_data_mock.assert_not_called()
get_noncrossval_sc_mock.assert_called_with(adrc_monkey, False)
eval_one_mock.assert_has_calls([
call("Apfel", adrc_monkey, "apple", 1, 1, 100, 49, False,
False, False, False, False, [10, 50, 100, 500, 1000], 'adapt'),
call("Banane", adrc_monkey, "banana", 1, 1, 100, 49, False,
False, False, False, False, [10, 50, 100, 500, 1000], 'adapt'),
call("Kirsche", adrc_monkey, "cherry", 1, 1, 100, 49, False,
False, False, False, False, [10, 50, 100, 500, 1000], 'adapt')
])
DataFrame_mock.assert_called_with({'best_guess': ['abc', 'def', 'ghi'],
'guesses': [1, 2, 3]})
concat_mock.assert_called_with([dfforms_mock, "dfoutmock"], axis=1)
assert isinstance(tqdm_mock.called_with, enumerate)
# tear down
sanity.tqdm = tqdm_real
del adrc_monkey, AdrcMonkey, tqdm_real, tqdm_mock, dfforms_mock, df_exp
def test_eval_one():
"""Are eval_adapt, eval_recon called and their results evaluated?"""
class AdrcMonkey:
pass
adrc_monkey = AdrcMonkey()
# test when target is hit in first round
with patch("loanpy.sanity.eval_adapt") as eval_adapt_mock:
with patch("loanpy.sanity.eval_recon") as eval_recon_mock:
eval_adapt_mock.return_value = {
"guesses": 123, "best_guess": "bla"}
assert eval_one("apple", "Apfel", adrc_monkey, False,
False, False, False, 1, 1, 100, 49, False,
[10, 50, 100, 500, 1000], 'adapt'
) == {"guesses": 123, "best_guess": "bla"}
# assert correct args were passed on to eval_adapt
eval_adapt_mock.assert_called_with(
"apple",
"Apfel",
adrc_monkey,
10,
False,
False,
False,
False,
1,
1,
100,
49,
False)
# assert eval_recon was not called
eval_recon_mock.assert_not_called()
# test when target is hit in 2nd round
with patch("loanpy.sanity.eval_adapt",
side_effect=[
{"guesses": float("inf"), "best_guess": "bli"},
{"guesses": 123, "best_guess": "bla"}]) as eval_adapt_mock:
with patch("loanpy.sanity.eval_recon") as eval_recon_mock:
assert eval_one("apple", "Apfel", adrc_monkey, False,
False, False, False, 1, 1, 100, 49, False,
[10, 50, 100, 500, 1000], 'adapt'
) == {"guesses": 123, "best_guess": "bla"}
# eval_adapt called twice
eval_adapt_mock.assert_has_calls([
call("apple", "Apfel", adrc_monkey,
10, False, False, False, False, 1, 1, 100, 49, False),
call("apple", "Apfel", adrc_monkey,
50, False, False, False, False, 1, 1, 100, 49, False)])
# assert eval_recon was not called
eval_recon_mock.assert_not_called()
# test when target is hit in 3rd round
with patch("loanpy.sanity.eval_adapt",
side_effect=[
{"guesses": float("inf"), "best_guess": "bli"},
{"guesses": float("inf"), "best_guess": "bla"},
{"guesses": 123, "best_guess": "blu"}]
) as eval_adapt_mock:
assert eval_one("apple", "Apfel", adrc_monkey, False,
False, False, False, 1, 1, 100, 49, False,
[10, 50, 100, 500, 1000], 'adapt'
) == {"guesses": 123, "best_guess": "blu"}
# eval_adapt called 3 times
eval_adapt_mock.assert_has_calls([
call("apple", "Apfel", adrc_monkey,
10, False, False, False, False, 1, 1, 100, 49, False),
call("apple", "Apfel", adrc_monkey,
50, False, False, False, False, 1, 1, 100, 49, False),
call("apple", "Apfel", adrc_monkey,
100, False, False, False, False, 1, 1, 100, 49, False)])
# assert eval_recon was not called
eval_recon_mock.assert_not_called()
# test when target is not hit
with patch("loanpy.sanity.eval_adapt") as eval_adapt_mock:
eval_adapt_mock.return_value = {"guesses": float("inf"),
"best_guess": "bla"}
with patch("loanpy.sanity.eval_recon") as eval_recon_mock:
assert eval_one("apple", "Apfel", adrc_monkey, False,
False, False, False, 1, 1, 100, 49, False,
[10, 50, 100, 500, 1000], 'adapt'
) == {"guesses": float("inf"), "best_guess": "bla"}
# assert eval_adapt called as many times as guesslist is long
eval_adapt_mock.assert_has_calls([
call("apple", "Apfel", adrc_monkey,
10, False, False, False, False, 1, 1, 100, 49, False),
call("apple", "Apfel", adrc_monkey,
50, False, False, False, False, 1, 1, 100, 49, False),
call("apple", "Apfel", adrc_monkey,
100, False, False, False, False, 1, 1, 100, 49, False),
call("apple", "Apfel", adrc_monkey,
500, False, False, False, False, 1, 1, 100, 49, False),
call("apple", "Apfel", adrc_monkey,
1000, False, False, False, False, 1, 1, 100, 49, False)
])
# assert eval_recon was not called
eval_recon_mock.assert_not_called()
# test if reconstruct is called when mode=="reconstruct"
with patch("loanpy.sanity.eval_recon") as eval_recon_mock:
eval_recon_mock.return_value = {"guesses": float("inf"),
"best_guess": "bla"}
with patch("loanpy.sanity.eval_adapt") as eval_adapt_mock:
assert eval_one("apple", "Apfel", adrc_monkey, False,
False, False, False, 1, 1, 100, 49, False,
[10, 50, 100, 500, 1000], 'reconstruct'
) == {"guesses": float("inf"), "best_guess": "bla"}
# eval eval_recon called as many times as guesslist is long
eval_recon_mock.assert_has_calls([
call("apple", "Apfel", adrc_monkey,
10, False, False, False, False, 1, 1, 100, 49, False),
call("apple", "Apfel", adrc_monkey,
50, False, False, False, False, 1, 1, 100, 49, False),
call("apple", "Apfel", adrc_monkey,
100, False, False, False, False, 1, 1, 100, 49, False),
call("apple", "Apfel", adrc_monkey,
500, False, False, False, False, 1, 1, 100, 49, False),
call("apple", "Apfel", adrc_monkey,
1000, False, False, False, False, 1, 1, 100, 49, False)
])
# assert eval_adapt not called
eval_adapt_mock.assert_not_called()
del adrc_monkey, AdrcMonkey
def test_eval_adapt():
"""Is result of loanpy.Adrc.adrc.adapt evaluated?"""
class AdrcMonkey:
def __init__(self, adapt_returns=None, adapt_raises=None):
self.workflow = {"step1": "ya", "step2": "ye", "step3": "yu"}
self.adapt_returns = adapt_returns
self.adapt_raises = adapt_raises
self.adapt_called_with = []
def adapt(self, *args): # the patch. This is being run.
self.adapt_called_with.append([self, *args])
if self.adapt_raises is not None:
raise self.adapt_raises()
return self.adapt_returns
adrc_monkey = AdrcMonkey(adapt_raises=KeyError)
sanity.Adrc, real_Adrc = AdrcMonkey, sanity.Adrc
# check if keyerror is handled correctly (no prediction possible)
with patch("loanpy.sanity.get_howmany") as get_howmany_mock:
get_howmany_mock.return_value = (1, 2, 3)
# assert with show_workflow=False
assert eval_adapt(
"Apfel",
adrc_monkey,
"apple",
10,
1,
1,
100,
49,
False,
False,
False,
False,
False) == {
'best_guess': 'KeyError',
'guesses': float("inf")}
get_howmany_mock.assert_called_with(10, 1, 1)
# assert call
assert adrc_monkey.adapt_called_with[0] == [
adrc_monkey, "apple",
1, 2, 3, 100, 49, False, False, False, False, False]
# assert with show_workflow=True
assert eval_adapt(
"Apfel",
adrc_monkey,
"apple",
10,
1,
1,
100,
49,
False,
False,
False,
False,
True) == {
"best_guess": "KeyError",
"guesses": float("inf"),
"step1": "ya",
"step2": "ye",
"step3": "yu"}
get_howmany_mock.assert_called_with(10, 1, 1)
assert adrc_monkey.adapt_called_with[1] == [
adrc_monkey, "apple", 1, 2, 3, 100, 49,
False, False, False, False, True]
# check if valueerror is handled correctly (wrong predictions made)
adrc_monkey = AdrcMonkey(adapt_returns="yamdelavasa, 2nd_g, 3rd_g")
with patch("loanpy.sanity.get_howmany") as get_howmany_mock:
get_howmany_mock.return_value = (1, 2, 3)
assert eval_adapt("Apfel", adrc_monkey, "apple",
10, 1, 1, 100, 49,
False, False, False, False, False) == {
# "didn't hit target but this was best guess"
"guesses": float("inf"), "best_guess": "yamdelavasa"}
get_howmany_mock.assert_called_with(10, 1, 1)
assert adrc_monkey.adapt_called_with[0] == [
adrc_monkey, "apple", 1, 2, 3, 100, 49,
False, False, False, False, False]
# assert with show_workflow=True
assert eval_adapt(
"Apfel",
adrc_monkey,
"apple",
10,
1,
1,
100,
49,
False,
False,
False,
False,
True) == {
"guesses": float("inf"),
"best_guess": "yamdelavasa",
"step1": "ya",
"step2": "ye",
"step3": "yu"}
get_howmany_mock.assert_called_with(10, 1, 1)
assert adrc_monkey.adapt_called_with[1] == [
adrc_monkey, "apple", 1, 2, 3, 100, 49,
False, False, False, False, True]
# -check if no error is handled correctly (right prediction made)
adrc_monkey = AdrcMonkey(adapt_returns="yamdelavasa, def, Apfel")
with patch("loanpy.sanity.get_howmany") as get_howmany_mock:
get_howmany_mock.return_value = (1, 2, 3)
assert eval_adapt(
"Apfel",
adrc_monkey,
"apple",
10,
1,
1,
100,
49,
False,
False,
False,
False,
False) == {
'best_guess': 'Apfel',
'guesses': 3}
# hit the target, so: best guess = target (even if not on index 0!)
get_howmany_mock.assert_called_with(10, 1, 1)
assert adrc_monkey.adapt_called_with[0] == [
adrc_monkey, "apple", 1, 2, 3, 100, 49,
False, False, False, False, False]
# assert with show_workflow=True
assert eval_adapt(
"Apfel",
adrc_monkey,
"apple",
10,
1,
1,
100,
49,
False,
False,
False,
False,
True) == {
'best_guess': 'Apfel',
'guesses': 3,
'step1': 'ya',
'step2': 'ye',
'step3': 'yu'}
get_howmany_mock.assert_called_with(10, 1, 1)
assert adrc_monkey.adapt_called_with[1] == [
adrc_monkey, "apple", 1, 2, 3, 100, 49,
False, False, False, False, True]
# tear down
sanity.Adrc = real_Adrc
del AdrcMonkey, adrc_monkey, real_Adrc
def test_eval_recon():
"""Is result of loanpy.adrc.Adrc.reconstruct evaluated?"""
# set up
class AdrcMonkey:
def __init__(self, reconstruct_returns):
self.reconstruct_returns = reconstruct_returns
self.reconstruct_called_with = []
def reconstruct(self, *args): # the patch. This is being run.
self.reconstruct_called_with.append([self, *args])
return self.reconstruct_returns
# plug in class
sanity.Adrc, real_Adrc = AdrcMonkey, sanity.Adrc
# 1: target not hit, keyerror (else condition)
adrc_monkey = AdrcMonkey("#a, b c# not old")
assert eval_recon(
"Apfel",
adrc_monkey,
"apple",
10,
1,
1,
100,
49,
False,
False,
False,
False,
False) == {
'best_guess': '#a, b c# not old',
'guesses': float("inf")}
# assert call (source and target gets flipped!)
assert adrc_monkey.reconstruct_called_with[0] == [
adrc_monkey, "apple", 10, 1, 1, 100, 49,
False, False, False, False, False]
# target not hit, short regex (capital "A" missing)
adrc_monkey = AdrcMonkey("(a)(p)(p|f)(e)?(l)(e)?")
assert eval_recon(
"Apfel",
adrc_monkey,
"apple",
10,
1,
1,
100,
49,
False,
False,
False,
False,
False) == {
'best_guess': '(a)(p)(p|f)(e)?(l)(e)?',
'guesses': float("inf")}
# assert call
assert adrc_monkey.reconstruct_called_with[0] == [
adrc_monkey, "apple", 10, 1, 1, 100, 49,
False, False, False, False, False]
# target not hit, long regex (capital "A" missing)
adrc_monkey = AdrcMonkey("^Appele$|^Appel$")
assert eval_recon(
"Apfel",
adrc_monkey,
"apple",
10,
1,
1,
100,
49,
False,
False,
False,
False,
False) == {
'best_guess': 'Appele',
'guesses': float("inf")}
# assert call
assert adrc_monkey.reconstruct_called_with[0] == [
adrc_monkey, "apple", 10, 1, 1, 100, 49,
False, False, False, False, False]
# 2: target hit with short regex
adrc_monkey = AdrcMonkey("(a|A)(p)(p|f)(e)?(l)(e)?")
assert eval_recon(
"Apfel",
adrc_monkey,
"apple",
10,
1,
1,
100,
49,
False,
False,
False,
False,
False) == {
'best_guess': '(a|A)(p)(p|f)(e)?(l)(e)?',
'guesses': 10}
# 3: target hit with long regex
adrc_monkey = AdrcMonkey( # correct solution on index nr 5
"^Appele$|^Appel$|^Apple$|^Appl$|^Apfele$|^Apfel$|^Apfle$|^Apfl$")
assert eval_recon(
"Apfel",
adrc_monkey,
"apple",
10,
1,
1,
100,
49,
False,
False,
False,
False,
False) == {
'best_guess': 'Apfel',
'guesses': 6}
# tear down
sanity.Adrc = real_Adrc
del adrc_monkey, real_Adrc, AdrcMonkey
def test_get_noncrossval_sc():
"""Are non-crossvalidated sound correspondences extracted and assigned?"""
# set up
class AdrcMonkey:
def __init__(self): self.get_sound_corresp_called_with = []
def get_sound_corresp(self, *args):
self.get_sound_corresp_called_with.append([*args])
return [1, 2, 3, 4, 5, 6]
# test with writesc=False
adrc_monkey = AdrcMonkey()
get_noncrossval_sc(adrc_monkey, False)
assert adrc_monkey.__dict__ == {
'get_sound_corresp_called_with': [[False]],
'scdict': 1, 'scdict_phonotactics': 4, 'sedict': 2}
# test with writesc=Path
adrc_monkey, path = AdrcMonkey(), Path()
get_noncrossval_sc(adrc_monkey, path)
assert adrc_monkey.__dict__ == {
'get_sound_corresp_called_with': [[path]],
'scdict': 1, 'scdict_phonotactics': 4, 'sedict': 2}
del adrc_monkey, AdrcMonkey
def test_get_crossval_data():
"""check if correct row is dropped from df for cross-validation"""
# set up mock class for input and instantiate it
class AdrcMonkey:
def __init__(self):
self.forms_target_language = ["apple", "banana", "cherry"]
self.get_sound_corresp_called_with = []
self.dfety = DataFrame({"Target_Form":
["apple", "banana", "cherry"],
"color": ["green", "yellow", "red"]})
def get_sound_corresp(self, *args):
self.get_sound_corresp_called_with.append([*args])
return [{"d1": "scdict"}, {"d2": "sedict"}, {},
{"d3": "scdict_phonotactics"}, {}, {}]
def get_inventories(self, *args):
return ({"a", "p", "l", "e", "b", "n", "c", "h", "r", "y"},
{"a", "pp", "l", "e", "b", "n", "ch", "rr", "y"},
{"VCCVC", "CVCVCV", "CCVCCV"})
adrc_monkey = AdrcMonkey()
# set up actual output as variable
adrc_obj_out = get_crossval_data(adrc_monkey, 1, None)
# assert scdict and scdict_phonotactics were correctly plugged into
# adrc_class
assert adrc_obj_out.scdict == {"d1": "scdict"}
assert adrc_obj_out.sedict == {"d2": "sedict"}
assert adrc_obj_out.scdict_phonotactics == {"d3": "scdict_phonotactics"}
assert adrc_obj_out.forms_target_language == ["apple", "cherry"]
assert adrc_monkey.get_sound_corresp_called_with == [[None]]
# tear down
del AdrcMonkey, adrc_monkey, adrc_obj_out
def test_postprocess():
"""Is result of loanpy.sanity.loop_thru_data postprocessed correctly?"""
# patch functions
with patch("loanpy.sanity.get_nse4df",
side_effect=["out1_getnse4df",
"out2_getnse4df"]) as get_nse4df_mock:
with patch(
"loanpy.sanity.phonotactics_\
predicted") as phonotactics_predicted_mock:
phonotactics_predicted_mock.return_value = "out_phonotacticspred"
with patch("loanpy.sanity.get_dist") as get_dist_mock:
get_dist_mock.return_value = "out_getldnld"
# assert return value
assert postprocess("in1") == "out_getldnld"
# assert calls
get_dist_mock.assert_called_with("out_phonotacticspred", "best_guess")
phonotactics_predicted_mock.assert_called_with("out2_getnse4df")
get_nse4df_mock.assert_has_calls(
[call('in1', 'Target_Form'), call('out1_getnse4df', 'best_guess')]
)
# test with show_workflow=False
# patch functions
with patch("loanpy.sanity.get_nse4df",
side_effect=["out1_getnse4df",
"out2_getnse4df"]) as get_nse4df_mock:
with patch("loanpy.\
sanity.phonotactics_predicted") as phonotactics_predicted_mock:
phonotactics_predicted_mock.return_value = "out_phonotacticspred"
with patch("loanpy.sanity.get_dist") as get_dist_mock:
get_dist_mock.return_value = "out_getldnld"
# assert return value
assert postprocess("in1") == "out_getldnld"
# assert calls
get_dist_mock.assert_called_with("out_phonotacticspred", "best_guess")
phonotactics_predicted_mock.assert_called_with(
"out2_getnse4df") # only called if show_workflow is True
get_nse4df_mock.assert_has_calls(
[call('in1', 'Target_Form'), call('out1_getnse4df', 'best_guess')]
)
def test_postprocess2():
"""Is result of loanpy.sanity.postprocess postprocessed correctly?"""
# set up
df_exp = DataFrame({"guesses": [1, 2, 3]})
class AdrcMonkey:
def __init__(self): self.dfety = df_exp
adrc_monkey = AdrcMonkey()
# patch functions
with patch("loanpy.sanity.get_tpr_fpr_opt") as get_tpr_fpr_opt_mock:
get_tpr_fpr_opt_mock.return_value = ("x", "y", (7, 8, 9))
with patch("loanpy.sanity.make_stat") as make_stat_mock:
make_stat_mock.return_value = "out_stat"
assert postprocess2(adrc_monkey, [4, 5, 6], None) == "out_stat"
# assert calls
assert_series_equal(get_tpr_fpr_opt_mock.call_args_list[0][0][0],
Series([1, 2, 3], name="guesses"))
assert get_tpr_fpr_opt_mock.call_args_list[0][0][1] == [4, 5, 6]
assert get_tpr_fpr_opt_mock.call_args_list[0][0][2] == 3
make_stat_mock.assert_called_with(9, 8, 6, 3)
# test with write=Path()
path2out = Path(__file__).parent / "postprocess2.csv"
# patch functions
with patch("loanpy.sanity.get_tpr_fpr_opt") as get_tpr_fpr_opt_mock:
get_tpr_fpr_opt_mock.return_value = ("x", "y", (7, 8, 9))
with patch("loanpy.sanity.make_stat") as make_stat_mock:
make_stat_mock.return_value = "out_stat"
with patch("loanpy.sanity.plot_roc") as plot_roc_mock:
plot_roc_mock.return_value = None
assert postprocess2(
adrc_monkey,
[4, 5, 6],
"adapt.csv",
path2out) == "out_stat"
# assert output was written
assert_frame_equal( | read_csv(path2out) | pandas.read_csv |
# -*- coding: utf-8 -*-
# -*- python 3 -*-
# -*- <NAME> -*-
# Import packages
import re
import numpy as np
import pandas as pd
import os ##for directory
import sys
import pprint
'''general function for easy use of python'''
def splitAndCombine(gene, rxn, sep0, moveDuplicate=False):
## one rxn has several genes, this function was used to splite the genes
## used for the dataframe data
gene = gene.fillna('NA') # fill the NaN with 'NA'
gene0 = gene.tolist()
rxn0 = rxn.tolist()
s1 = list()
s2 = list()
for i in range(len(gene0)):
s1 = s1 + [rxn0[i]] * len(gene0[i].split(sep0))
s2 = s2 + gene0[i].split(sep0)
df0 = pd.DataFrame({'V1': s1,
'V2': s2}
)
if moveDuplicate == True:
df00 = df0.drop_duplicates()
else:
df00 = df0
return df00
def getSimilarTarget(rxn_yeast0,rxn_newGPR0,ss):
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
rxn_yeast1 = np.array(rxn_yeast0) # np.ndarray()
rxn_yeast2 = rxn_yeast1.tolist()
rxn_yeast3 = pd.Series((v[0] for v in rxn_yeast2))
rxn_newGPR1 = np.array(rxn_newGPR0) # np.ndarray()
rxn_newGPR2 = rxn_newGPR1.tolist()
rxn_newGPR3 = pd.Series((v[0] for v in rxn_newGPR2))
similarTarget = [None] * ss
for i in range(ss):
similarTarget[i] = process.extract(rxn_newGPR3[i], rxn_yeast3, limit=2)
return similarTarget
'''
#example
newMet = pd.read_excel('new metabolite for check.xlsx')
newMet0 = newMet[['name_unify']]
gemMet = pd.read_excel('unique metabolite in yeastGEM.xlsx')
gemMet0 = gemMet[['Description_simple']]
ss0 = len(newMet0)
similarTarget0 = getSimilarTarget(gemMet0,newMet0,ss=ss0)
'''
def singleMapping (description, item1, item2, dataframe=True):
"""get the single description of from item1 for item2 based on mapping"""
#description = w
#item1 = v
#item2 = testData
# used for the list data
if dataframe:
description = description.tolist()
item1 = item1.tolist()
item2 = item2.tolist()
else:
pass
index = [None]*len(item2)
result = [None]*len(item2)
tt = [None]*len(item2)
for i in range(len(item2)):
if item2[i] in item1:
index[i] = item1.index(item2[i])
result[i] = description[index[i]]
else:
index[i] = None
result[i] = None
return result
'''
w=['a','b','c']
v=[1,2,3]
s=[3,1,2,4]
singleMapping(w,v,s,dataframe=False)
'''
def multiMapping (description, item1, item2, dataframe=True, sep=";", removeDuplicates=True):
"""get multiple description of from item1 for item2 based on mapping"""
#description = w
#item1 = v
#item2 = testData
#used for the list data
if dataframe:
description = description.tolist()
item1 = item1.tolist()
item2 = item2.tolist()
else:
pass
result = [None]*len(item2)
for i in range(len(item2)):
if item2[i] in item1:
index0 = [description[index] for index in range(len(item1)) if item1[index] == item2[i]]
if removeDuplicates:
index1 = pd.unique(index0).tolist()
else:
index1 = index0
result[i] = sep.join(str(e) for e in index1) #string cat
else:
result[i] = None
return result
'''
# example data to test all the above function
df1 = pd.DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3,
'B' : ['A', 'B', 'C'] * 4,
'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2}
)
df2 = pd.DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3,
'B' : ['A', 'B', 'C'] * 4,
'D' : np.random.randn(12)})
df2['C'] = singleMapping(df1['C'], df1['A'], df2['A'])
df2['C'] = multiMapping(df1['C'], df1['A'], df2['A'])
'''
def updateOneColumn(df1, df2, key0, value0):
"""
using dataframe df2 to update the df1
:param df1:
:param df2:
:param key0: the common column name, a string, used for the mapping
:param value0: the column in df2 used to update the df1
:return:
example
df10 = pd.DataFrame({'A': ['a', 'b', 'c'],
'B': ['x', 'y', 'z']})
df20 = pd.DataFrame({'A':['c','b'],
'B': ['e', 'd']})
updateOneColumn(df10,df20,key0='A',value0='B')
"""
df10 = df1.copy()
df11 = df1.copy()
df10[value0] = multiMapping(df2[value0], df2[key0], df10[key0])
for i, x in df10.iterrows():
print(x[value0])
if x[value0] is None:
df11[value0][i] = df11[value0][i]
else:
df11[value0][i] = df10[value0][i]
return df11[value0]
def RemoveDuplicated(s1):
"""
example:
s1=['a // a', 'b // a', None, 'non']
"""
s2=list()
for x in s1:
print(x)
if x =='non':
s2.append('')
elif x is None:
s2.append('')
else:
if "//" in x:
s0= x.split(' // ')
s0 = [x.strip() for x in s0]
s01= list(set(s0))
if len(s01)==1:
s2.append(s01[0])
else:
s2.append(' // '.join(s01))
else:
s2.append(x)
return s2
def nz(value):
'''
Convert None to string else return value.
'''
if value == None:
return 'none'
return value
def AutoUpdate(description1, para1, description2, para2):
# using the description1 in para1 to update the description2 in para2
description1 = description1.tolist()
para1 = para1.tolist()
description2 = description2.tolist()
para2 = para2.tolist()
ss = [None]*len(para2)
for i in range(len(para2)):
if para2[i] in para1:
ss[i] = para1.index(para2[i])
else:
ss[i] = None
for i in range(len(para2)):
if ss[i] != None:
description2[i] = description1[ss[i]]
else:
description2[i] = description2[i]
return description2
'''
# example data to test the followed function
df1 = pd.DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3,
'B' : ['A', 'B', 'C'] * 4,
'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2}
)
df2 = df1.iloc[[1,2]]
df2['C'] = ['good','good']
df1['C'] = AutoUpdate(df2['C'],df2['A'],df1['C'],df1['A'])
'''
def calculateFrequency(list0, item0):
'''
This function is used to calculate the frequency occured in a list and turn the frequency list into a dataframe
:param list0: ['a','b','a']
:param item0:
:return: a dataframe with two columns
'''
summary = pd.Series(list0).value_counts()
summary = summary.to_frame(name='number')
summary.index.name = item0
summary.reset_index(inplace=True)
return summary
"""function for model part"""
from cobra.manipulation import remove_genes
def getStrainGEMrxn(s0, geneMatrix0, templateGEM, templateGene):
'''
This function is used to produce the strain specific model based on panYeast and gene existence matrix
from 1011 yeast strain genome sequence project
:param s0: strain name 'BFC'
:param geneMatrix0: dataframe contains the gene existence matrix for each strain. geneMatrix = pd.read_csv('../data/geneMatrix0 of 1011 yeast strains.txt', sep="\t")
:templateGEM:
:templateGene:
:return: the rxn list for each new reaction3
'''
s1 = ['geneID', s0]
geneList = geneMatrix0.loc[:, s1]
gene_exist = singleMapping(geneList.loc[:, s0].tolist(), geneList.loc[:, 'geneID'].tolist(), templateGene,
dataframe=False)
gene_exist = [0 if v is None else v for v in gene_exist]
gene_remove = [x for x, y in zip(templateGene, gene_exist) if y < 1]
newModel = templateGEM.copy()
# for i in range(len(gene_remove)):
# print(i)
# remove_genes(newModel, [gene_remove[i]], remove_reactions=True)
remove_genes(newModel, gene_remove, remove_reactions=True)
rxn = []
for x in newModel.reactions:
rxn.append(x.id)
return rxn
def getStrainGEM(s0, geneMatrix0, templateGEM, templateGene):
'''
This function is used to produce the strain specific model based on panYeast and gene existence matrix
from 1011 yeast strain genome sequence project
:param s0: strain name 'BFC'
:param geneMatrix0: dataframe contains the gene existence matrix for each strain. geneMatrix = pd.read_csv('../data/geneMatrix0 of 1011 yeast strains.txt', sep="\t")
:templateGEM:
:templateGene:
:return: the rxn list for each new reaction3
'''
s1 = ['geneID', s0]
geneList = geneMatrix0.loc[:, s1]
gene_exist = singleMapping(geneList.loc[:, s0].tolist(), geneList.loc[:, 'geneID'].tolist(), templateGene,
dataframe=False)
gene_exist = [0 if v is None else v for v in gene_exist]
gene_remove = [x for x, y in zip(templateGene, gene_exist) if y < 1]
newModel = templateGEM.copy()
# for i in range(len(gene_remove)):
# print(i)
# remove_genes(newModel, [gene_remove[i]], remove_reactions=True)
remove_genes(newModel, gene_remove, remove_reactions=True)
return newModel
def getRemoveGeneList(s0, geneMatrix0, templateGEM, templateGene):
'''
This function is used to produce the strain specific model based on panYeast and gene existence matrix
from 1011 yeast strain genome sequence project
:param s0: strain name 'BFC'
:param geneMatrix0: dataframe contains the gene existence matrix for each strain. geneMatrix = pd.read_csv('../data/geneMatrix0 of 1011 yeast strains.txt', sep="\t")
:templateGEM:
:templateGene:
:return: the gene list removed from each strain specific model
'''
s1 = ['geneID', s0]
geneList = geneMatrix0.loc[:, s1]
gene_exist = singleMapping(geneList.loc[:, s0].tolist(), geneList.loc[:, 'geneID'].tolist(), templateGene,
dataframe=False)
gene_exist = [0 if v is None else v for v in gene_exist]
gene_remove = [x for x, y in zip(templateGene, gene_exist) if y < 1]
newModel = templateGEM.copy()
# for i in range(len(gene_remove)):
# print(i)
# remove_genes(newModel, [gene_remove[i]], remove_reactions=True)
remove_genes(newModel, gene_remove, remove_reactions=True)
gene = []
for x in newModel.genes:
gene.append(x.id)
gene_remove_from_model = list(set(templateGene)-set(gene))
return gene_remove_from_model
def updateGPR(gpr0, nameMapping):
'''
This function is used to update the gpr reaction only with 'or' relation. It is used to replace the old gene name using
the new gene name. Also it did not remove the duplicated value.
:param: gpr0
:nameMapping: a dataframe contains the mapping relation between the old and new gene name, has two columns-'geneID', 'panID'
:return: gpr with the replaced new gene name
'''
#this function is mainly used to update the gene relation with 'or'
s1 = gpr0
s2 = s1.split(' ')
s3 = singleMapping(nameMapping['panID'].tolist(),nameMapping['geneID'].tolist(),s2, dataframe=False)
for i, x in enumerate(s3):
if x is None:
s3[i]=s2[i]
else:
s3[i] = s3[i]
s4 = ' '.join(s3)
return s4
def getCompartment(rxn):
"""
This function is used to obtain the compartment information from reaction of yeastGEM
:param rxn: example acetyl-CoA[m] + L-glutamate[m] -> coenzyme A[m] + H+[m] + N-acetyl-L-glutamate[m]'
:return:
"""
cp1 = ['[c]','[ce]','[e]','[er]','[erm]','[g]','[gm]','[lp]','[m]','[mm]','[n]','[p]','[v]','[vm]']
cp2 = ['cytoplasm','cell envelope','extracellular','endoplasmic reticulum','endoplasmic reticulum membrane','Golgi','Golgi membrane','lipid particle',
'mitochondrion','mitochondrial membrane','nucleus','peroxisome','vacuole','vacuolar membrane']
cp = [None]*len(cp1)
for i in range(len(cp1)):
if cp1[i] in rxn:
cp[i] = cp2[i]
else:
cp[i] = None
cp1 = [x for i,x in enumerate(cp) if x is not None]
cp0 = ';'.join(str(e) for e in cp1)
return cp0
def getCommonCompartment(c1,c2, sep0=";"):
'''this function could get the common part between string c1 and c2
for example, c1="a;b", c2="a;c" '''
if c1 is None:
c10 = 'NONE'
else:
c10 = c1.split(sep0)
c10 = [x.strip() for x in c10]
if c2 is None:
c20 = 'NONE'
else:
c20 = c2.split(sep0)
c20 = [x.strip() for x in c20]
c3 = list(set(c10).intersection(c20))
c4 = sep0.join(str(e) for e in c3)
return c4
def getRXNgeneMapping(rxn0, gpr0):
'''this function is used to split the GPR;
input, for example rxn0=['r1','g2']
gpr0=['a or c','a and b']
output, each rxn related with each gene'''
s1 = rxn0
s2 = gpr0
s2 = s2.str.replace('and','@')
s2 = s2.str.replace('or','@')
s2 = s2.str.replace('\\( ','')
s2 = s2.str.replace('\\(\\( ','')
s2 = s2.str.replace('\\(', '')
s2 = s2.str.replace('\\(\\(', '')
s2 = s2.str.replace(' \\)','')
s2 = s2.str.replace(' \\)\\) ','')
s2 = s2.str.replace('\\)', '')
s2 = s2.str.replace('\\)\\) ', '')
s3 = splitAndCombine(s2,s1,sep0="@")
s3['V2'] = s3['V2'].str.strip()
s3.columns = ['rxnID', 'gene']
return s3
def getRXNmetaboliteMapping(rxn0, met0):
'''this function is used to split the equation of metabolites; used to produce the dataframe format of GEM using
cobrapy
input, for example rxn0=['r1','g2']
gpr0=['a => c','a => b']
output, each rxn related with each gene'''
met_annotation = pd.read_excel('/Users/luho/PycharmProjects/model/cobrapy/result/met_yeastGEM.xlsx')
s1 = rxn0
s2 = met0
s3 = splitAndCombine(s2,s1,sep0=" ")
s3['V2'] = s3['V2'].str.strip()
s3.columns = ['rxnID', 'met']
s3['met_name'] = singleMapping(met_annotation['description'],met_annotation['m_name'],s3['met'])
for i, x in s3.iterrows():
if s3['met_name'][i] is None:
s3['met_name'][i] = s3['met'][i]
else:
s3['met_name'][i] = s3['met_name'][i]
return s3
def correctSomeWrongFormat(model0):
"""
This function is used to correct some wrong format when read yeastGEM model from cobratoolbox
"""
# Correct metabolite ids:
for met in model0.metabolites:
met.id = met.id.replace('__93__', '')
met._id = met._id.replace('__91__', '_')
print(met.id)
for reaction in model0.reactions:
reaction.gene_reaction_rule = reaction.gene_reaction_rule.replace('__45__', '-')
print(reaction.gene_reaction_rule)
for gene in model0.genes:
gene.id = gene.id.replace('__45__', '-')
return model0
def produceMetaboliteList(model0):
#produce the dataframe for the metabolites from yeastGEM
met_list = [None] * len(model0.metabolites)
met_dataframe = pd.DataFrame({'m_name': met_list,
'description': met_list,
'formula': met_list,
'charge': met_list,
'chebi': met_list,
'kegg': met_list,
'MNXID': met_list})
for i, met in enumerate(model0.metabolites):
print(i)
met_dataframe['m_name'][i] = met.id
met_dataframe['description'][i] = met.name
met_dataframe['formula'][i] = met.formula
met_dataframe['charge'][i] = met.charge
key = list(met.annotation.keys())
if 'chebi' in key:
met_dataframe['chebi'][i] = met.annotation['chebi']
else:
met_dataframe['chebi'][i] = None
if 'kegg.compound' in key:
met_dataframe['kegg'][i] = met.annotation['kegg.compound']
else:
met_dataframe['kegg'][i] = None
if 'metanetx.chemical' in key:
met_dataframe['MNXID'][i] = met.annotation['metanetx.chemical']
else:
met_dataframe['MNXID'][i] = None
#s2 = met_dataframe['m_name'].str.split('_', expand=True)
#met_dataframe['description'] = met_dataframe['description'].str.replace('\s\[', '@')
#s3 = met_dataframe['description'].str.split('@', expand=True)
#met_dataframe['description'] = s3.iloc[:, 0] + '[' + s2.iloc[:, 2] + ']'
return met_dataframe
def produceGeneList(model0):
#produce the gene list from GEM
genelist = []
for i in model0.genes:
print(i)
genelist.append(i.id)
return genelist
def produceRxnList(model0):
#produce the dataframe for the rxn from yeastGEM
reaction_list =[None]*len(model0.reactions)
gem_dataframe = pd.DataFrame({'name':reaction_list,
'equation':reaction_list,
'GPR':reaction_list,
'rxnID':reaction_list,
'formula':reaction_list
})
for i, reaction in enumerate(model0.reactions):
print(i)
gem_dataframe['name'][i] = reaction.name
gem_dataframe['equation'][i] = reaction.reaction
gem_dataframe['GPR'][i] = reaction.gene_reaction_rule
gem_dataframe['rxnID'][i] = reaction.id
gem_dataframe['ID'] = ['R'+ str(i) for i in range(0, len(model0.reactions))]
gem_dataframe['GPR'] = gem_dataframe['GPR'].str.replace('__45__', '-')
#replace the metabolite name in gem_dataframe
s0 = getRXNmetaboliteMapping(gem_dataframe['rxnID'], gem_dataframe['equation'])
gem_dataframe['formula'] = multiMapping(s0['met_name'],s0['rxnID'],gem_dataframe['rxnID'],removeDuplicates=False)
gem_dataframe['formula'] = gem_dataframe['formula'].str.replace(";", " ")
return gem_dataframe
def exchange(s1, subystem):
"""
this function is used to define the exchange reaction
s1=['a --> b','a <=> c', 'H+ [extracellular] + L-citrulline [extracellular] <=> H+ [cytoplasm] L-citrulline [cytoplasm]', ' a--> ']
subsystem = ['a','a','b','']
"""
for i, x in enumerate(s1):
print(i)
if ' --> ' in x:
x0 = x.split(' --> ')
if len(x0[1]) >=1:
#subystem.append('General') # exchange
subystem[i] = subystem[i]
else:
subystem[i] ='Exchange reaction' #exchange
print(subystem[i])
if ' <=> ' in x:
x0 = x.split(' <=> ')
if len(x0[1]) >=1:
#subystem.append('General') # exchange
subystem[i] = subystem[i]
else:
subystem[i] ='Exchange reaction' #exchange
print(subystem[i])
else:
subystem[i] = subystem[i]
return subystem
def exchange_ecYeast(s1, subystem):
"""
this function is used to define the exchange reaction
s1=['a --> b','a <=> c', 'H+ [extracellular] + L-citrulline [extracellular] <=> H+ [cytoplasm] L-citrulline [cytoplasm]', ' a--> ']
subsystem = ['a','a','b','']
"""
for i, x in enumerate(s1):
print(i)
if ' --> ' in x:
x0 = x.split(' --> ')
if len(x0[1]) >=1 and len(x0[0]) >=1:
#subystem.append('General') # exchange
subystem[i] = subystem[i]
else:
subystem[i] ='Exchange reaction' #exchange
print(subystem[i])
if ' <=> ' in x:
x0 = x.split(' <=> ')
if len(x0[1]) >=1 and len(x0[0]) >=1:
#subystem.append('General') # exchange
subystem[i] = subystem[i]
else:
subystem[i] ='Exchange reaction' #exchange
print(subystem[i])
else:
subystem[i] = subystem[i]
return subystem
#SLIME rxn
def SLIME(rxnName, subsystem):
"""
if the rxnName contains the SLIME, classify the reaction into SLIME reaction
"""
for i,x in enumerate(rxnName):
if 'SLIME' in x:
subsystem[i] = 'SLIME reaction'
print(subsystem[i])
else:
subsystem[i] = subsystem[i]
return subsystem
def transport(s1, subsysem):
"""
this function is used to define the transport reaction
#example
s1 =['2-methylbutyl acetate [cytoplasm] --> 2-methylbutyl acetate [extracellular]', 'H+ [extracellular] + phosphoenolpyruvate [extracellular] <=> H+ [cytoplasm] + phosphoenolpyruvate [cytoplasm]']
subsysem = ['a','b']
:param s1:
:param subsysem:
:return:
"""
for i, x0 in enumerate(s1):
x1 = re.findall(r"\[([A-Za-z0-9_\s]+)\]", x0)
x0 = x0.replace('(','[')
x0 = x0.replace(')',']')
x2 = re.sub(r"\[([A-Za-z0-9_\s+]+)\]", '', x0)
if "<=>" in x2:
x3 = x2.split("<=>")
elif "<->" in x2: #bigg database format
x3 = x2.split("<->")
else:
x3 = x2.split("-->")
x3 = [x.strip() for x in x3]
x1=pd.unique(x1).tolist() #remove the duplicated
if '+' in x3[0]:
x30=x3[0].split('+')
else:
x30=x3[0]
x30=[x.strip() for x in x30]
x30 = [x for x in x30 if x != '']
if '+' in x3[1]:
x31 = x3[1].split('+')
else:
x31=x3[1]
x31 = [x.strip() for x in x31]
x31 = [x for x in x31 if x != '']
if set(x30) == set(x31):
subsysem[i] ='Transport' + '['+', '.join(x1)+']'
print(subsysem[i])
elif set(x30)-set(['ATP','H2O']) == set(x31) - set(['ADP','phosphate','H']):
subsysem[i] = 'Transport' + '[' + ', '.join(x1) + ']'
print(subsysem[i])
else:
subsysem[i] = subsysem[i]
return subsysem
def findRemoveRxnBasedOnGene(rxnRemovedGene, rxnAllGene):
'''this function is used to remove rxn based on the removed gene list
if the all genes in a reaction were in the removed gene list, then this reaction was removed'''
#x0 = gem_dataframe['removed_gene'].tolist()
#y0 = gem_dataframe['all_gene'].tolist()
x0=rxnRemovedGene.tolist()
y0=rxnAllGene.tolist()
removed_rxn = list()
for x,y in zip(x0,y0):
if x is None:
removed_rxn.append('NO')
else:
if len(x) ==len(y):
removed_rxn.append('YES')
else:
removed_rxn.append('NO')
return removed_rxn
def saveExcel(infile, outfile):
'''
function to save the dataframe into xlsx format
:param infile:
:param outfile:
:return:
'''
writer = | pd.ExcelWriter(outfile) | pandas.ExcelWriter |
import streamlit as st
import pandas as pd
import database as db
import numpy as np
from modeling import allocation
# setup & connection with database
connection = db.Connection()
data_salas1 = connection.query("SELECT * FROM cenario1_salas")
data_salas1 = pd.DataFrame(data_salas1, columns=['id_sala','numero_cadeiras','acessivel','qualidade'] )
data_turmas1 = connection.query("SELECT * FROM cenario1_turmas")
data_turmas1 = pd.DataFrame(data_turmas1, columns=['disciplina','professor','dias_horario','numero_alunos','curso','período','acessibilidade','qualidade'] )
data_salas2 = connection.query("SELECT * FROM cenario2_salas")
data_salas2 = pd.DataFrame(data_salas2, columns=['id_sala','numero_cadeiras','acessivel','qualidade'] )
data_turmas2 = connection.query("SELECT * FROM cenario2_turmas")
data_turmas2 = | pd.DataFrame(data_turmas2, columns=['disciplina','professor','dias_horario','numero_alunos','curso','período','acessibilidade','qualidade'] ) | pandas.DataFrame |
"""
Created on Oct 7, 2013
@author: mmendez
"""
import pandas as pd
import os
def group(config_file, group_and_comparisons, group_id):
header = []
genes_counts = []
group = [group
for group in group_and_comparisons['group_definitions']
if group_id == group['id']][0]
for dataset in config_file['datasets']:
df = pd.read_csv(dataset['summary'], sep='\t')
df = df[(df.robustness == 10) & (df.accuracy > .9) & ((df.cl1 == group_id) | (df.cl2 == group_id))]
header.append([dataset['name'], df.id.value_counts().size])
gene_count = list(df.gene.value_counts().iteritems())
genes_counts.append(gene_count)
df = pd.DataFrame(genes_counts).T
group = {'id': group['id'],
'name': group['name'],
'print_id': group['print_id'],
'print_name': group['print_name'],
'header': header,
'rows': df.applymap(lambda x: ('', '') if pd.isnull(x) else x).values.tolist(),
}
return group
def group_list(config_file, group_and_comparisons):
datasets = []
for dataset in config_file['datasets']:
df = | pd.read_csv(dataset['summary'], sep='\t') | pandas.read_csv |
from natsort import natsorted
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.stats import spearmanr
def load_TargetScan(gene):
# Load TargetScan predictions for gene
return set(open("data/TargetScan_{}.txt".format(gene)).read().strip().split("\n"))
def load_paired_tables(path):
# Open isomiR / mRNA TCGA tables and select paired samples
df_isomiR = pd.read_csv("{}/isomiR_normal.tsv".format(path), sep="\t", index_col=0)
df_mRNA = pd.read_csv("{}/mRNA_normal.tsv".format(path), sep="\t", index_col=0)
paired_samples = natsorted(list(set(df_isomiR.columns).intersection(df_mRNA.columns)))
df_isomiR = df_isomiR[paired_samples]
df_mRNA = df_mRNA[paired_samples]
return df_isomiR, df_mRNA
def load_tables_for_organ(organ):
# Merge all TCGA samples for specified organ
global organs_projects, isomiR_thr_quantile
dfs_isomiR, dfs_mRNA = [], []
for project in organs_projects[organ]:
df_isomiR, df_mRNA = load_paired_tables("data/TCGA/{}".format(project))
# Select highly expressed isomiR's
medians = df_isomiR.median(axis=1)
isomiR_thr = np.quantile(medians[medians > 0], isomiR_thr_quantile)
df_isomiR = df_isomiR.loc[ df_isomiR.median(axis=1) >= isomiR_thr ]
dfs_isomiR.append(df_isomiR)
dfs_mRNA.append(df_mRNA)
common_isomiRs = set.intersection(*[set(df_isomiR.index) for df_isomiR in dfs_isomiR])
dfs_isomiR = [df_isomiR.loc[common_isomiRs] for df_isomiR in dfs_isomiR]
df_isomiR = pd.concat(dfs_isomiR, axis=1)
df_mRNA = pd.concat(dfs_mRNA, axis=1)
return df_isomiR, df_mRNA
def show_gene_in_organs(gene, ax, lab):
# Draw boxplot and export statistical summary to the table (S1 Table)
matrix = []
for organ in organs_projects:
df_isomiR, df_mRNA = load_tables_for_organ(organ)
for e in df_mRNA.loc[gene].tolist():
matrix.append([organ, e])
coln = gene + ", log(FPKM-UQ)"
df = pd.DataFrame(matrix, columns=["Organ", coln])
medians = df.groupby("Organ").median()
order = medians.sort_values(coln, ascending=False).index
cmap = sns.color_palette("Blues")
table = [["Organ", "Minimum", "Q1", "Q2", "Q3", "Maximum", "Mean", "Standard deviation"]]
for o in order:
x = df.loc[df.Organ == o][coln]
table.append([o, np.min(x), np.quantile(x, 0.25), np.quantile(x, 0.5), np.quantile(x, 0.75), np.max(x), np.mean(x), np.std(x)])
print("\n".join(["\t".join(list(map(str, row))) for row in table]), file=open("tables/S1_{}.tsv".format(gene), "w"))
#mpl.rcParams["figure.figsize"] = 4, 5
p = sns.boxplot(x="Organ", y=coln, data=df, order=order, color=cmap[3], saturation=1, ax=ax)
p.set_xticklabels(p.get_xticklabels(), rotation=45, ha="right")
p.set_xlabel("")
ax.set_title(lab, loc="left", fontdict={"fontsize": "xx-large", "fontweight": "bold"})
def get_regulators_of_genes(genes):
# Correlation analysis on miRNA -> gene interactions
global R_thr
# Load TargetScan and the list of intronic sense miRNAs
db_regulators = {gene: load_TargetScan(gene) for gene in genes}
df = | pd.read_csv("data/intronic_sense_miRNA.tsv", sep="\t", header=None) | pandas.read_csv |
"""
Code to manage results of many simulations together.
"""
import pandas as pd
from tctx.networks.turtle_data import DEFAULT_ACT_BINS
from tctx.util import sim
import os.path
import logging
from pathlib import Path
import json
from tqdm.auto import tqdm as pbar
import datetime
import h5py
import numpy as np
import re
BASE_FOLDER = Path('/gpfs/gjor/personal/riquelmej/dev/tctx/data/interim')
# TODO find a way to get rid of these
LIST_LIKE_COLS = [
r'.*forced_times.*',
r'.*input_targeted_times.*',
r'.*input_pulsepacket_pulse_times.*',
r'.*voltage_measure.*',
r'foll_gids',
]
def get_col_names(name):
"""
We store results for each simulation in indexed files.
Every simulation will indicate its results with a path and an idx property.
Returns the pair of column names for the given type of results.
Eg: "spikes" -> ("spikes_path", "spikes_idx")
"""
return f'{name}_path', f'{name}_idx'
def _get_multi_store_cols(store_names):
"""return a list of columns that represent the given store names"""
import itertools
return list(itertools.chain(*[get_col_names(name) for name in store_names]))
def _hdf5_table_exists(path, key) -> bool:
"""
Check if table was saved and it's not empty
"""
with h5py.File(path, 'r') as f:
if key in f.keys():
# Empty DataFrames store an axis0 and axis1 of length 1, but no data blocks.
# This is very tied into pytables implementation, but it saves us having to load the dataframe.
return len(f[key].keys()) > 2
else:
return False
class CatMangler:
"""
because categories are nice to work with interactively but are a pain to save to HDF5
by default we save and load data as ints
this object makes it easy to convert between the two
"""
def __init__(self):
self.category_types = {
'layer': pd.CategoricalDtype(categories=['L1', 'L2', 'L3'], ordered=False),
'con_type': | pd.CategoricalDtype(categories=['e2e', 'e2i', 'i2e', 'i2i'], ordered=False) | pandas.CategoricalDtype |
# -*- coding: utf-8 -*-
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex, Categorical
from pandas.compat import StringIO
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
class DtypeTests(object):
def test_passing_dtype(self):
# see gh-6607
df = DataFrame(np.random.rand(5, 2).round(4), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# see gh-3795: passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
# for parsing, interpret object as str
result = self.read_csv(path, dtype=object, index_col=0)
tm.assert_frame_equal(result, expected)
# we expect all object columns, so need to
# convert to test for equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# see gh-12048: empty frame
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
assert result['one'].dtype == 'u1'
assert result['two'].dtype == 'object'
def test_categorical_dtype(self):
# GH 10153
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['a', 'a', 'b']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype=CategoricalDtype())
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'a': 'category',
'b': 'category',
'c': CategoricalDtype()})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'b': 'category'})
expected = pd.DataFrame({'a': [1, 1, 2],
'b': Categorical(['a', 'a', 'b']),
'c': [3.4, 3.4, 4.5]})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
# unsorted
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', 'b', 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
# missing
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', np.nan, 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
@pytest.mark.slow
def test_categorical_dtype_high_cardinality_numeric(self):
# GH 18186
data = np.sort([str(i) for i in range(524289)])
expected = DataFrame({'a': Categorical(data, ordered=True)})
actual = self.read_csv(StringIO('a\n' + '\n'.join(data)),
dtype='category')
actual["a"] = actual["a"].cat.reorder_categories(
np.sort(actual.a.cat.categories), ordered=True)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_encoding(self):
# GH 10153
pth = tm.get_data_path('unicode_series.csv')
encoding = 'latin-1'
expected = self.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = self.read_csv(pth, header=None, encoding=encoding,
dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
pth = tm.get_data_path('utf16_ex.txt')
encoding = 'utf-16'
expected = self.read_table(pth, encoding=encoding)
expected = expected.apply(Categorical)
actual = self.read_table(pth, encoding=encoding, dtype='category')
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'])}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'])},
index=[2, 3])]
actuals = self.read_csv(StringIO(data), dtype={'b': 'category'},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('categories', [
['a', 'b', 'c'],
['a', 'c', 'b'],
['a', 'b', 'c', 'd'],
['c', 'b', 'a'],
])
def test_categorical_categoricaldtype(self, categories, ordered):
data = """a,b
1,a
1,b
1,b
2,c"""
expected = pd.DataFrame({
"a": [1, 1, 1, 2],
"b": Categorical(['a', 'b', 'b', 'c'],
categories=categories,
ordered=ordered)
})
dtype = {"b": CategoricalDtype(categories=categories,
ordered=ordered)}
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_unsorted(self):
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(['c', 'b', 'a'])
expected = pd.DataFrame({
'a': [1, 1, 1, 2],
'b': Categorical(['a', 'b', 'b', 'c'], categories=['c', 'b', 'a'])
})
result = self.read_csv(StringIO(data), dtype={'b': dtype})
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_numeric(self):
dtype = {'b': CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = pd.DataFrame({'b': Categorical([1, 1, 2, 3])})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_datetime(self):
dtype = {
'b': CategoricalDtype(pd.date_range('2017', '2019', freq='AS'))
}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
dtype = {
'b': CategoricalDtype([pd.Timestamp("2014")])
}
data = "b\n2014-01-01\n2014-01-01T00:00:00"
expected = pd.DataFrame({'b': Categorical([pd.Timestamp('2014')] * 2)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_timedelta(self):
dtype = {'b': CategoricalDtype(pd.to_timedelta(['1H', '2H', '3H']))}
data = "b\n1H\n2H\n3H"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_unexpected_categories(self):
dtype = {'b': CategoricalDtype(['a', 'b', 'd', 'e'])}
data = "b\nd\na\nc\nd" # Unexpected c
expected = pd.DataFrame({"b": Categorical(list('dacd'),
dtype=dtype['b'])})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
cats = ['a', 'b', 'c']
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'],
categories=cats)}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'],
categories=cats)},
index=[2, 3])]
dtype = CategoricalDtype(cats)
actuals = self.read_csv(StringIO(data), dtype={'b': dtype},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv(StringIO(data), index_col=['one', 'two'],
dtype={'one': 'u1', 1: 'f8'})
exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'),
np.empty(0, dtype='O')],
names=['one', 'two'])
expected = DataFrame(
{'three': np.empty(0, dtype=np.object)}, index=exp_idx)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 'one.1': 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_indexes(self):
# see gh-9424
expected = pd.concat([Series([], name='one', dtype='u1'),
| Series([], name='one.1', dtype='f') | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[14]:
import xgboost as xgb
import numpy as np
import pandas as pd
train = pd.read_csv("./pytrain.csv", index_col = 0)
test = pd.read_csv(" ./pytest.csv", index_col = 0)
# In[15]:
X = np.array(train.ix[:,0:311])
y = np.array(train['SalePrice'])
test = np.array(test)
# In[3]:
from sklearn.model_selection import KFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
dtrain = xgb.DMatrix(X, label = y)
params = {"max_depth":2, "eta":0.1}
XGB = xgb.cv(params, dtrain, num_boost_round=500, early_stopping_rounds=100)
# In[5]:
XGB.loc[100:,["test-rmse-mean", "train-rmse-mean"]].plot()
# can find accuray about 0.125
# In[12]:
XGBOOST = xgb.XGBRegressor(n_estimators=360, max_depth=2, learning_rate=0.1) #the params were tuned using xgb.cv
XGBOOST.fit(X, y)
predict = np.expm1(XGBOOST.predict(test))
# In[10]:
from sklearn.model_selection import KFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
predict = 1 + np.zeros(1459)
kf = KFold(n_splits=20,random_state=666,shuffle=True)
test_errors = []
for train_index, test_index in kf.split(X):
Xtrain, Xtest = X[train_index], X[test_index]
ytrain, ytest = y[train_index], y[test_index]
XGBOOST = make_pipeline(RobustScaler(), xgb.XGBRegressor(n_estimators=360, max_depth=2, learning_rate=0.125)) #the params were tuned using xgb.cv
XGBOOST.fit(Xtrain, ytrain)
pred = XGBOOST.predict(test)
predict = predict * pred
xpre = XGBOOST.predict(Xtest)
test_errors.append(np.square(xpre - ytest).mean() ** 0.5)
predict = np.expm1(predict ** (1/20))
print(np.mean(test_errors))
# In[17]:
from pandas.core.frame import DataFrame
result = | DataFrame(predict) | pandas.core.frame.DataFrame |
"""
Contains utilities and functions that are commonly used in the figure creation files.
"""
import sys
from logging import basicConfig, INFO, info
from time import time
from string import ascii_lowercase
from matplotlib import gridspec, pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.cm as cm
from matplotlib import rcParams
import matplotlib.ticker as mticker
from scipy.stats import multivariate_normal
import seaborn as sns
import pandas as pds
import numpy as np
import svgutils.transform as st
from ..sampling import sampleSpec, cellPopulations
from valentbind import polyc, polyfc
import matplotlib
matplotlib.use('AGG')
fdir = './output/'
rcParams['pcolor.shading'] = 'auto'
rcParams['svg.fonttype'] = 'none'
LR, HR = 1.5, 6.5
def getSetup(figsize, gridd):
""" Establish figure set-up with subplots. """
sns.set(style="whitegrid", font_scale=0.7, color_codes=True, palette="colorblind", rc={"grid.linestyle": "dotted", "axes.linewidth": 0.6})
# Setup plotting space and grid
f = plt.figure(figsize=figsize, constrained_layout=True)
gs1 = gridspec.GridSpec(*gridd, figure=f)
# Get list of axis objects
ax = list()
for x in range(gridd[0] * gridd[1]):
ax.append(f.add_subplot(gs1[x]))
return (ax, f)
def subplotLabel(axs, indices=False):
""" Place subplot labels on figure. """
if not indices:
for ii, ax in enumerate(axs):
ax.text(-0.2, 1.25, ascii_lowercase[ii], transform=ax.transAxes, fontsize=16, fontweight="bold", va="top")
else:
for jj, index in enumerate(indices):
axs[index].text(-0.2, 1.25, ascii_lowercase[jj], transform=axs[index].transAxes, fontsize=16, fontweight="bold", va="top")
def setFontSize(ax, fsize, xsci=[], ysci=[], nolegend=[]):
for i, subax in enumerate(ax):
subax.tick_params(axis="x", labelsize=fsize - 1)
subax.tick_params(axis="y", labelsize=fsize - 1)
subax.set_xlabel(subax.get_xlabel(), fontsize=fsize)
subax.set_ylabel(subax.get_ylabel(), fontsize=fsize)
subax.set_title(subax.get_title(), fontsize=fsize)
if subax.get_legend() is not None and i not in nolegend:
subax.legend(prop={'size': fsize - 1})
if i in xsci:
subax.set_xscale('log')
if i in ysci:
subax.set_yscale('log')
def overlayCartoon(figFile, cartoonFile, x, y, scalee=1, scale_x=1, scale_y=1):
""" Add cartoon to a figure file. """
# Overlay Figure cartoons
template = st.fromfile(figFile)
cartoon = st.fromfile(cartoonFile).getroot()
cartoon.moveto(x, y, scale_x=scalee * scale_x, scale_y=scalee * scale_y)
template.append(cartoon)
template.save(figFile)
def genFigure():
basicConfig(format='%(levelname)s:%(message)s', level=INFO)
start = time()
nameOut = 'figure' + sys.argv[1]
exec('from .' + nameOut + ' import makeFigure', globals())
ff = makeFigure()
ff.savefig(fdir + nameOut + '.svg', dpi=ff.dpi, bbox_inches='tight', pad_inches=0)
if sys.argv[1] == '1':
# Overlay Figure 1 cartoon
overlayCartoon(fdir + 'figure1.svg',
'./selecv/graphics/figure_1a.svg', 10, 15, scalee=0.02, scale_x=0.45, scale_y=0.45)
overlayCartoon(fdir + 'figure1.svg',
'./selecv/graphics/figure_1b.svg', 0, 280, scalee=0.24, scale_x=1, scale_y=1)
if sys.argv[1] == '2':
overlayCartoon(fdir + 'figure2.svg',
'./selecv/graphics/figure_2a.svg', 10, 0, scalee=0.18, scale_x=1, scale_y=1)
if sys.argv[1] == '3':
overlayCartoon(fdir + 'figure3.svg',
'./selecv/graphics/figure_3a.svg', 30, 0, scalee=0.22, scale_x=1, scale_y=1)
if sys.argv[1] == '4':
overlayCartoon(fdir + 'figure4.svg',
'./selecv/graphics/figure_4a.svg', 10, 0, scalee=0.18, scale_x=1, scale_y=1)
info('%s is done after %s seconds.', nameOut, time() - start)
def sampleReceptors(df, nsample=100):
"""
Generate samples in each sample space
"""
Populations = df.Population.unique()
sampledf = pds.DataFrame(columns=["Population", "Receptor_1", "Receptor_2"])
for population in Populations:
populationdf = df[df["Population"] == population]
RtotMeans = np.array([populationdf.Receptor_1.to_numpy(), populationdf.Receptor_2.to_numpy()]).flatten()
RtotCovs = populationdf.Covariance_Matrix.to_numpy()[0]
pop = np.power(10.0, multivariate_normal.rvs(mean=RtotMeans, cov=RtotCovs, size=nsample))
popdf = pds.DataFrame({"Population": population, "Receptor_1": pop[:, 0], "Receptor_2": pop[:, 1]})
sampledf = sampledf.append(popdf)
return sampledf
def getFuncDict():
"""Directs key word to given function"""
FuncDict = {"Aff": affHeatMap,
"Valency": ValencyPlot,
"Mix": MixPlot}
return FuncDict
def popCompare(ax, popList, scanKey, Kav, L0=1e-9, KxStar=1e-10, f=1):
"""Takes in populations and parameters to scan over and creates line plot"""
funcDict = getFuncDict()
Title = popList[0] + " to " + popList[1]
for ii, pop in enumerate(popList):
if ii >= 2:
Title += "/" + pop
Title = Title + " binding ratio"
funcDict[scanKey](ax, popList, Kav, L0, KxStar, f, Title)
def affHeatMap(ax, names, Kav, L0, KxStar, f, Title, Cbar=True):
"Makes a heatmap comparing binding ratios of populations at a range of binding affinities"
npoints = 3
ticks = np.full([npoints], None)
affScan = np.logspace(Kav[0], Kav[1], npoints)
ticks[0], ticks[-1] = "${}$".format(int(10**(9 - Kav[0]))), "${}$".format(int(10**(9 - Kav[1])))
sampMeans = np.zeros(npoints)
ratioDF = | pds.DataFrame(columns=affScan, index=affScan) | pandas.DataFrame |
import pandas as pd
import numpy as np
import glob, os
from collections import defaultdict
from copy import deepcopy
from tsfeatures import tsfeatures
from natsort import natsort_keygen, natsorted
from .analysis import evaluate_prediction_owa
from sklearn.preprocessing import LabelEncoder
PIETER_TEST = False
seas_dict = {'Hourly': {'seasonality': 24, 'input_size': 24,
'output_size': 48, 'freq': 'H',
'tail_inputs': 0},
'Daily': {'seasonality': 7, 'input_size': 7,
'output_size': 14, 'freq': 'D',
'tail_inputs': 0},
'Weekly': {'seasonality': 52, 'input_size': 52,
'output_size': 13, 'freq': 'W',
'tail_inputs': 0},
'Monthly': {'seasonality': 12, 'input_size': 12,
'output_size':18, 'freq': 'M',
'tail_inputs': 0},
'Quarterly': {'seasonality': 4, 'input_size': 4,
'output_size': 8, 'freq': 'Q',
'tail_inputs': 0},
'Yearly': {'seasonality': 1, 'input_size': 4,
'output_size': 6, 'freq': 'D',
'tail_inputs': 0}}
FREQ_TO_INT = {'H': 24, 'D': 1,
'M': 12, 'Q': 4,
'W':1, 'Y': 1}
def extract_meta_features(seas_dict, dataset_name, y_train_df, y_test_df):
tail_inputs = seas_dict[dataset_name]['tail_inputs']
if tail_inputs > 0:
y_train_df = y_train_df.groupby('unique_id').tail(tail_inputs)
meta_features = tsfeatures(y_train_df, FREQ_TO_INT[seas_dict[dataset_name]['freq']])
# Sort unique_ids naturally/alphanumerically so we can concatenate later
meta_features = meta_features.sort_values(
by="unique_id",
key=natsort_keygen()
)
#Drop all nan columns, make other nans = zeros
meta_features = meta_features.dropna(axis=1, how='all').fillna(0).add_prefix('mf_')
meta_features = meta_features.rename(columns={"mf_unique_id": "unique_id"})
#Repeat features for every point of forecast horizon
# (The same reference series' statistics were present for each point.)
meta_features = meta_features.loc[meta_features.index.repeat(seas_dict[dataset_name]['output_size'])]
y_test_df = pd.concat([y_test_df, meta_features.drop('unique_id', axis=1).reset_index(drop=True)], axis=1)
return y_train_df, y_test_df
def compute_model_errors(seas_dict, base_model_names, dataset_name, y_train_df, y_test_df):
print("Calculating model errors")
errors_train_df = pd.DataFrame({'unique_id': natsorted(y_test_df.unique_id.unique())}).set_index('unique_id')
for mdl in base_model_names:
if mdl != 'mdl_naive2':
train_set_mdl = y_test_df.rename(columns={mdl: "y_hat"})
predictions_df = train_set_mdl[['unique_id', 'y_hat', 'ds']]
model_owa, _, _ = evaluate_prediction_owa(predictions_df=predictions_df,
y_train_df=y_train_df,
y_test_df=train_set_mdl,
naive2_seasonality=seas_dict[dataset_name]['seasonality'],
return_averages=False)
errors_train_df['OWA_'+ mdl] = model_owa
errors_train_df = errors_train_df.loc[errors_train_df.index.repeat(seas_dict[dataset_name]['output_size'])]
y_test_df = pd.concat([y_test_df, errors_train_df.reset_index(drop=True)], axis=1)
return y_test_df
def m4_parser(dataset_name, data_directory, forecast_directory, load_existing_dataframes=True):
"""
Transform M4 data into a panel.
Parameters
----------
dataset_name: str
Frequency of the data. Example: 'Yearly'.
directory: str
Custom directory where data will be saved.
num_obs: int
Number of time series to return.
"""
# Load previously computed dataframes
if load_existing_dataframes and os.path.isfile(data_directory + "/Preprocessed/" + dataset_name + '_X_train_df.csv'):
X_train_df = pd.read_csv(data_directory + "/Preprocessed/" + dataset_name + '_X_train_df.csv')
y_train_df = pd.read_csv(data_directory + "/Preprocessed/" + dataset_name + '_y_train_df.csv')
X_test_df = pd.read_csv(data_directory + "/Preprocessed/" + dataset_name + '_X_test_df.csv')
y_test_df = pd.read_csv(data_directory + "/Preprocessed/" + dataset_name + '_y_test_df.csv')
return X_train_df, y_train_df, X_test_df, y_test_df
print("Processing data")
train_directory = data_directory + "/Train/"
test_directory = data_directory + "/Test/"
freq = seas_dict[dataset_name]['freq']
m4_info = pd.read_csv(data_directory+'/M4-info.csv', usecols=['M4id','category'])
m4_info = m4_info[m4_info['M4id'].str.startswith(dataset_name[0])].reset_index(drop=True)
# Train data
train_path='{}{}-train.csv'.format(train_directory, dataset_name)
train_df = pd.read_csv(train_path)
train_df = train_df.rename(columns={'V1':'unique_id'})
train_df = pd.wide_to_long(train_df, stubnames=["V"], i="unique_id", j="ds").reset_index()
train_df = train_df.rename(columns={'V':'y'})
train_df = train_df.dropna()
train_df['split'] = 'train'
train_df['ds'] = train_df['ds']-1
# Get len of series per unique_id
len_series = train_df.groupby('unique_id').agg({'ds': 'max'}).reset_index()
len_series.columns = ['unique_id', 'len_serie']
# Test data
test_path='{}{}-test.csv'.format(test_directory, dataset_name)
test_df = pd.read_csv(test_path)
test_df = test_df.rename(columns={'V1':'unique_id'})
test_df = pd.wide_to_long(test_df, stubnames=["V"], i="unique_id", j="ds").reset_index()
test_df = test_df.rename(columns={'V':'y'})
test_df = test_df.dropna()
test_df['split'] = 'test'
test_df = test_df.merge(len_series, on='unique_id')
test_df['ds'] = test_df['ds'] + test_df['len_serie'] - 1
test_df = test_df[['unique_id','ds','y','split']]
df = pd.concat((train_df,test_df)).reset_index(drop=True)
df = df.sort_values(by=['unique_id', 'ds'], key=natsort_keygen()).reset_index(drop=True)
# Create column with dates with freq of dataset
len_series = df.groupby('unique_id').agg({'ds': 'max'}).reset_index()
len_series = len_series.sort_values(by=['unique_id'], key=natsort_keygen())
dates = []
for i in range(len(len_series)):
len_serie = len_series.iloc[i,1]
ranges = | pd.date_range(start='1970/01/01', periods=len_serie, freq=freq) | pandas.date_range |
'''
Python Client for generic Biothings API services
'''
from __future__ import print_function
import os
import sys
import platform
import time
import warnings
from itertools import islice
import requests
import logging
from .utils import str_types
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
try:
from pandas import DataFrame, json_normalize
df_avail = True
except ImportError:
df_avail = False
try:
import requests_cache
caching_avail = True
except ImportError:
caching_avail = False
__version__ = '0.2.5'
# setting up the logging logger
_DEBUG_ = logging.DEBUG
logger = logging.getLogger("biothings_client")
logger.setLevel(_DEBUG_)
# creating the handler to output to stdout
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(_DEBUG_)
# setting up the logging formatter
# this formatter contains time, but will use without time for now
# formatter = logging.Formatter("[%(levelname)s %(asctime)s %(name)s:%(lineno)s] - %(message)s ")
formatter = logging.Formatter("%(levelname)-8s [%(name)s:%(lineno)s] - %(message)s")
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
class ScanError(Exception):
# for errors in scan search type
pass
def alwayslist(value):
'''If input value if not a list/tuple type, return it as a single value list.
Example:
>>> x = 'abc'
>>> for xx in alwayslist(x):
... print xx
>>> x = ['abc', 'def']
>>> for xx in alwayslist(x):
... print xx
'''
if isinstance(value, (list, tuple)):
return value
else:
return [value]
def safe_str(s, encoding='utf-8'):
'''Perform proper encoding if input is an unicode string.'''
try:
_s = str(s)
except UnicodeEncodeError:
_s = s.encode(encoding)
return _s
def list_itemcnt(li):
'''Return number of occurrence for each type of item in the input list.'''
x = {}
for item in li:
if item in x:
x[item] += 1
else:
x[item] = 1
return [(i, x[i]) for i in x]
def iter_n(iterable, n, with_cnt=False):
'''
Iterate an iterator by chunks (of n)
if with_cnt is True, return (chunk, cnt) each time
'''
it = iter(iterable)
if with_cnt:
cnt = 0
while True:
chunk = tuple(islice(it, n))
if not chunk:
return
if with_cnt:
cnt += len(chunk)
yield (chunk, cnt)
else:
yield chunk
class BiothingClient(object):
'''This is the client for a biothing web service.'''
def __init__(self, url=None):
if url is None:
url = self._default_url
self.url = url
if self.url[-1] == '/':
self.url = self.url[:-1]
self.max_query = self._max_query
# delay and step attributes are for batch queries.
self.delay = self._delay # delay is ignored when requests made from cache.
self.step = self._step
self.scroll_size = self._scroll_size
# raise requests.exceptions.HTTPError for status_code > 400
# but not for 404 on getvariant
# set to False to surpress the exceptions.
self.raise_for_status = True
self.default_user_agent = ("{package_header}/{client_version} ("
"python:{python_version} "
"requests:{requests_version}"
")").format(**{
'package_header': self._pkg_user_agent_header,
'client_version': __version__,
'python_version': platform.python_version(),
'requests_version': requests.__version__
})
self._cached = False
@staticmethod
def _dataframe(obj, dataframe, df_index=True):
'''Converts object to DataFrame (pandas)'''
if not df_avail:
# print("Error: pandas module must be installed for as_dataframe option.")
logger.error("Error: pandas module must be installed for as_dataframe option.")
return
# if dataframe not in ["by_source", "normal"]:
if dataframe not in [1, 2]:
raise ValueError(
"dataframe must be either 1 (using json_normalize) or 2 (using DataFrame.from_dict")
if 'hits' in obj:
if dataframe == 1:
df = | json_normalize(obj['hits']) | pandas.json_normalize |
import pandas as pd
import numpy as np
import pytest
import unittest
import datetime
import sys
import context
from fastbt.utils import *
def equation(a,b,c,x,y):
return a*x**2 + b*y + c
def test_multiargs_simple():
seq = pd.Series([equation(1,2,3,4,y) for y in range(20, 30)]).sort_index()
seq.index = range(20,30)
constants = {'a':1, 'b':2, 'c':3, 'x':4}
variables = {'y': range(20, 30)}
par = multi_args(equation, constants=constants, variables=variables).sort_index()
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert (x,) == y
def test_multiargs_product():
seq = []
for x in range(0,10):
for y in range(10,15):
seq.append(equation(1,2,3,x,y))
index = pd.MultiIndex.from_product([range(0, 10), range(10, 15)])
seq = pd.Series(seq)
seq.index = index
seq = seq.sort_index()
constants = {'a':1, 'b':2, 'c':3}
variables = {'x': range(0, 10), 'y': range(10,15)}
par = multi_args(equation, constants=constants,
variables=variables, isProduct=True).sort_index()
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert x == y
def test_multiargs_max_limit():
seq = []
for x in range(0,100):
for y in range(100, 150):
seq.append(equation(1,2,3,x,y))
index = pd.MultiIndex.from_product([range(0, 100), range(100, 150)])
seq = pd.Series(seq)
seq.index = index
seq = seq.sort_index()
constants = {'a':1, 'b':2, 'c':3}
variables = {'x': range(0, 100), 'y': range(100,150)}
par = multi_args(equation, constants=constants,
variables=variables, isProduct=True).sort_index()
assert len(par) == 1000
assert len(seq) == 5000
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert x == y
@pytest.mark.parametrize("maxLimit", [2000, 3000, 5000, 10000])
def test_multiargs_max_limit_adjust(maxLimit):
seq = []
for x in range(0,100):
for y in range(100, 150):
seq.append(equation(1,2,3,x,y))
index = pd.MultiIndex.from_product([range(0, 100), range(100, 150)])
seq = pd.Series(seq)
seq.index = index
seq = seq.sort_index()
constants = {'a':1, 'b':2, 'c':3}
variables = {'x': range(0, 100), 'y': range(100,150)}
par = multi_args(equation, constants=constants,
variables=variables, isProduct=True, maxLimit=maxLimit).sort_index()
assert len(par) == min(maxLimit, 5000)
assert len(seq) == 5000
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert x == y
def test_tick():
assert tick(112.71) == 112.7
assert tick(112.73) == 112.75
assert tick(1054.85, tick_size=0.1) == 1054.8
assert tick(1054.851, tick_size=0.1) == 1054.9
assert tick(104.73, 1) == 105
assert tick(103.2856, 0.01) == 103.29
assert tick(0.007814, 0.001) == 0.008
assert tick(0.00003562, 0.000001) == 0.000036
assert tick(0.000035617, 0.00000002) == 0.00003562
def test_tick_series():
s = | pd.Series([100.43, 200.32, 300.32]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 25 10:11:28 2019
@author: yazdsous
"""
import numpy as np
import pyodbc
import pandas as pd
import datetime
#conn = pyodbc.connect('Driver={SQL Server};'
# 'Server=DSQL23CAP;'
# 'Database=Regulatory_Untrusted;'
# 'Trusted_Connection=yes;')
conn0 = pyodbc.connect('Driver={SQL Server};'
'Server=Hosaka\Sqlp2;'
'Database=Eforms;'
'Trusted_Connection=yes;')
conn1 = pyodbc.connect('Driver={SQL Server};'
'Server=ndb-a1;'
'Database=Core;'
'Trusted_Connection=yes;')
###############################################################################
#This function accepts the filigid of the application + the pyodbc object
#joins the Form and FormField tables from Eform DB (on FormId) and returns the
#corresponding dataframe with formId, Name, FilingIdm, ASPFieldIdName, and
#ASPFiledIdValue
#Output: A tuple with (Dataframe, FormId)
###############################################################################
def formfields_by_filingId(filingid:str, conn0) -> pd.DataFrame:
query = "SELECT f.[FormId]\
,f.[AddedOn]\
,[Name]\
,[FilingId]\
,[ASPFieldIdName]\
,[ASPFieldIdValue]\
FROM [Eforms].[dbo].[Form] f\
JOIN [FormField] ff\
ON f.FormId = ff.FormId\
WHERE FilingId IS NOT NULL AND [FilingId] = \'{}\'\
ORDER BY ff.FormId DESC".format(filingid)
df_filingid = pd.read_sql(query,conn0)
return df_filingid, df_filingid.FormId[0]
###############################################################################
###############################################################################
#This function accepts the FormId of the application + the pyodbc object
#and extracts the contact information filled by the applicant.
#Output: A dataframe with the information corresponding to each contact type
#for the application (with the FormId passed as the argument)
###############################################################################
def contact_info(filingid:str, conn0) -> pd.DataFrame:
query = "SELECT [ContactId]\
,ct.Name Contact_Type\
,ct.ContactTypeId\
,[FormId]\
,[FirstName]\
,[LastName]\
,[Salutation]\
,[Title]\
,[Organization]\
,[Email]\
,Country.Name Country\
,Province.Name Province\
,[Address]\
,[City]\
,[PostalCode]\
,[PhoneNumber]\
,[PhoneExt]\
,[FaxNumber]\
FROM [Eforms].[dbo].[Contact] c\
JOIN ContactType ct\
ON c.ContactTypeId = ct.ContactTypeId\
JOIN Country\
ON Country.CountryId = c.CountryId\
JOIN Province\
ON Province.ProvinceId = c.ProvinceId WHERE FormId = (SELECT FormId FROM [Eforms].[dbo].[Form] WHERE FilingId = \'{}\')".format(filingid)
df_fid = pd.read_sql(query,conn0)
return df_fid
###############################################################################
###############################################################################
#Input: FilingId of the application + the pyodbc object
#Output: A dataframe with the information in CORE corresponding to the apps
#joins of CORE tables and filtering by the FilingId
###############################################################################
def rts_by_filingid(filingid:str, conn1) -> pd.DataFrame:
query = "SELECT f.[FileId], f.[FileNumber],f.[RecordsTitle],f.[RecordsDescription],\
a.[ActivityId],a.[EnglishTitle],a.[FrenchTitle],a.[Description] ActivityDescription,\
a.[ApplicationDate],a.[ReceivedDate],a.[ExpectedCompletionDate],a.[InternalProjectFlag],\
a.[StatusId],a.[CompletedDate],a.[DeactivationDate] ActivityDeactivationDate,\
a.[LegacyProjectXKey],a.[LetterForCommentFlag],a.[LetterForCommentExpireDate],\
a.[BusinessUnitId],a.[FederalLandId],a.[DecisionOnCompleteness],a.[EnglishProjectShortName],\
a.[FrenchProjectShortName],a.[FrenchDescription] FrenchDescriptionOfActivity,\
aa.[ActivityAttachmentId] ,aa.[LivelinkCompoundDocumentId],\
be.[BoardEventId] ,be.[NextTimeToBoard] ,be.[PurposeId],be.[Description],\
be.[PrimaryContactId],be.[SecondaryContactId] ,\
di.[DecisionItemId],di.[DecisionItemStatusId],di.[RegulatoryInstrumentNumber],\
di.[IssuedDate],di.[EffectiveDate],di.[ExpireDate],di.[SunsetDate],\
di.[IssuedToNonRegulatedCompanyFlag],di.[LetterOnly],di.[Comments],di.[AddedBy],\
di.[AddedOn],di.[ModifiedBy],di.[ModifiedOn],di.[WalkaroundFolderId],\
di.[BoardDecisionDate],di.[ReasonForCancelling],di.[GicApproval],di.[SentToMinisterDate],\
di.[MinisterToPrivyCouncilOfficeDate],di.[PrivyCouncilOfficeApprovalNumber],\
di.[PrivyCouncilOfficeApprovalDate],di.[RegulatoryOfficerAssignedId],di.[IsNGLLicence],\
di.[FrenchComments],fc.[FileCompanyId] ,fc.[CompanyId],\
c.[CompanyId] CompanyIdC,c.[CompanyCode] ,c.[LegalName],c.[DeactivationDate],c.[IsGroup1]\
FROM [File] f\
JOIN [Activity] a\
ON f.FileId = a.FileId\
JOIN [ActivityAttachment] aa\
ON a.ActivityId = aa.ActivityId\
FULL JOIN [BoardEvent] be\
ON be.ActivityId = a.ActivityId\
FULL JOIN [DecisionItem] di\
ON be.BoardEventId = di.BoardEventId\
JOIN [FileCompany] fc \
ON fc.FileId = a.FileId\
JOIN [Company] c\
ON c.CompanyId = fc.CompanyId\
WHERE aa.LivelinkCompoundDocumentId = \'{}\'".format(filingid)
df_filingid = pd.read_sql(query,conn1)
return df_filingid, df_filingid.shape[0]
###############################################################################
###############################################################################
#A DATAFRAME is passed as the argument to this function. Input Dataframe is the
#output of function formfileds_by_filingId(...)
#Output: Commodity type (one commodity or a multiple commodities, depending on
# the application) and whether it is export or in the case of gas applications
#it is export or both
###############################################################################
def application_type(df:pd.DataFrame) -> str:
try:
#GAS
app_name = df.Name[0]
df_fields = df.loc[:,['ASPFieldIdName','ASPFieldIdValue']]
if app_name == 's15ab_ShrtTrmNtrlGs_ImprtExprt':
gas_import = df_fields.loc[df_fields['ASPFieldIdName'] == 'chkbx_s15ab_ShrtTrmNtrlGs_ImprtExprt_Athrztns_ImportOrder','ASPFieldIdValue'].values[0]
gas_export = df_fields.loc[df_fields['ASPFieldIdName'] == 'chkbx_s15ab_ShrtTrmNtrlGs_ImprtExprt_Athrztns_ExportOrder','ASPFieldIdValue'].values[0]
if all(map((lambda value: value == 'True'), (gas_import,gas_export))):
return 'gas','gas_export_import'
elif gas_import == 'False' and gas_export == 'True':
return 'gas','gas_export'
elif gas_import == 'True' and gas_export == 'False':
return 'gas','gas_import'
#NGL
elif app_name == 's22_ShrtTrmNgl_Exprt':
propane_export = df_fields.loc[df_fields['ASPFieldIdName'] == 'chkbx_s22_ShrtTrmNgl_Exprt_Athrztns_ProductType_Propane','ASPFieldIdValue'].values[0]
butanes_export = df_fields.loc[df_fields['ASPFieldIdName'] == 'chkbx_s22_ShrtTrmNgl_Exprt_Athrztns_ProductType_Butanes','ASPFieldIdValue'].values[0]
if all(map((lambda value: value == 'True'), (propane_export,butanes_export))):
return 'ngl','propane_butanes_export'
elif propane_export == 'False' and butanes_export == 'True':
return 'ngl','butanes_export'
elif propane_export == 'True' and butanes_export == 'False':
return 'ngl','propane_export'
#OIL
elif app_name == 's28_ShrtTrmLghtHvCrdRfnd_Exprt':
light_heavy_crude_export = df_fields.loc[df_fields['ASPFieldIdName'] == 'chkbx_s28_ShrtTrmLghtHvCrdRfnd_Exprt_Athrztns_HeavyCrude','ASPFieldIdValue'].values[0]
refined_products_export = df_fields.loc[df_fields['ASPFieldIdName'] == 'chkbx_s28_ShrtTrmLghtHvCrdRfnd_Exprt_Athrztns_RefinedProducts','ASPFieldIdValue'].values[0]
if all(map((lambda value: value == 'True'), (light_heavy_crude_export,refined_products_export))):
return 'oil','lightheavycrude_refinedproducts_export'
elif light_heavy_crude_export == 'False' and refined_products_export == 'True':
return 'oil','lightheavycrude_export'
elif light_heavy_crude_export == 'True' and refined_products_export == 'False':
return 'oil','refinedproducts_export'
elif app_name == 's28_ShrtTrmHvCrd_Exprt':
return 'oil','heavycrude_export'
else:
return 'this is not a gas, ngl, or oil order'
except ValueError:
return 'Value'
except TypeError:
return 'Type'
###############################################################################
# NOTE:
###############################################################################
#GasType -> 1 -> Natural Gas
#GasType -> 2 -> Natural Gas, in the form of Liquefied Natural Gas
#GasType -> 3 -> Natural Gas, in the form of Compressed Natural Gas
###############################################################################
#Input: Commodity name in english
#Output: Commodity name in French
###############################################################################
def comm_type_english_french(df:pd.DataFrame) -> list:
try:
if application_type(df)[0] == 'gas':
gas_en,gas_fr = str(),str()
gas_type = df.loc[df['ASPFieldIdName'] == 'rbl_s15ab_ShrtTrmNtrlGs_ImprtExprt_Athrztns_ExportOrder_GasType','ASPFieldIdValue'].values[0]
if gas_type == '2':
gas_en = 'natural gas, in the form of Liquefied Natural Gas'
gas_fr = 'gaz, sous la forme de gaz naturel liquéfié seulement'
elif gas_type == '3':
gas_en = 'natural gas, in the form of compressed natural gas'
gas_fr = 'gaz, sous la forme de gaz naturel comprimé'
return gas_en , gas_fr
if application_type(df)[0] == 'oil':
oil_en,oil_fr = str(),str()
oil_type = application_type(df)[1]
if oil_type == 'lightheavycrude_refinedproducts_export':
oil_en = 'light and heavy crude oil and pefined petroleum products'
oil_fr = 'pétrole brut léger et lourd et produits pétroliers raffinés'
elif oil_type == 'lightheavycrude_export':
oil_en = 'light and heavy crude oil'
oil_fr = 'pétrole brut léger et lourd'
elif oil_type == 'refinedproducts_export':
oil_en = 'refined petroleum products'
oil_fr = 'produits pétroliers raffinés'
elif oil_type == 'heavycrude_export':
oil_en = 'heavy crude oil'
oil_fr = 'pétrole brut lourd'
return oil_en , oil_fr
if application_type(df)[0] == 'ngl':
ngl_en,ngl_fr = str(),str()
return ngl_en , ngl_fr
else:
return ('other comms....')
exit
except ValueError:
return 'Value'
except TypeError:
return 'Type'
#**************************************************************************************************
# input: month of the year in English in full version
# output: French months
# This function converts English months to French
#**************************************************************************************************
def month_to_french(month):
fr_months = ['janvier','février','mars','avril','mai','juin','juillet','août','septembre','octobre','novembre','décembre']
switcher = {
"January": fr_months[0],
"February": fr_months[1],
"March": fr_months[2],
"April": fr_months[3],
"May": fr_months[4],
"June": fr_months[5],
"July": fr_months[6],
"August": fr_months[7],
"September": fr_months[8],
"October": fr_months[9],
"November": fr_months[10],
"December": fr_months[11],
}
# get() method of dictionary data type returns
# value of passed argument if it is present
# in dictionary otherwise second argument will
# be assigned as default value of passed argument
return switcher.get(month, "nothing")
#**************************************************************************************************
# input: Date in the form of XX Month(English) XXXX
# output: French version
# This function converts English date to French
#**************************************************************************************************
def date_french(date_en:str)-> str:
try:
return(date_en.split()[0]) + ' '+ month_to_french(date_en.split()[1]) + ' ' + str(date_en.split()[2])
except ValueError:
return 'Value'
except TypeError:
return 'Type'
except:
return 'Wrong date format'
#**************************************************************************************************
#Skip the Weekends
#refernce: https://stackoverflow.com/questions/12691551/add-n-business-days-to-a-given-date-ignoring-holidays-and-weekends-in-python/23352801
#**************************************************************************************************
def add_business_days(from_date, ndays):
business_days_to_add = abs(ndays)
current_date = from_date
sign = ndays/abs(ndays)
while business_days_to_add > 0:
current_date += datetime.timedelta(sign * 1)
weekday = current_date.weekday()
if weekday >= 5: # sunday = 6
continue
business_days_to_add -= 1
return current_date
###############################################################################
#Input: index[0] of output tuple function formfileds_by_filingId(...)
#Output: Order start and end date
###############################################################################
def commence_end_order_gas(ctype:str, df:pd.DataFrame) -> list:
export_order_commence_date = str()
export_order_termination_date = str()
import_order_commence_date =str()
import_order_termination_date = str()
export_order_commence_date_fr = str()
export_order_termination_date_fr = str()
import_order_commence_date_fr = str()
import_order_termination_date_fr = str()
dt = df.AddedOn[0].date()
application_date = dt.strftime("%d %B %Y")
try:
if ctype[0] == 'gas':
#For a period of two years less one day commencing upon approval of the Board
if df.loc[df['ASPFieldIdName'] == 'rbl_s15ab_ShrtTrmNtrlGs_ImprtExprt_Athrztns_ExportOrder_TimeFrame','ASPFieldIdValue'].values[0] == '1':
#commences the day after application received date
ex_order_commence_date = add_business_days(pd.to_datetime(application_date),2)
export_order_commence_date = ex_order_commence_date.strftime("%d %B %Y")
export_order_commence_date_fr = date_french(export_order_commence_date) if len(export_order_commence_date.split()) == 3 else 'NULL'
ex_order_termination_date = ex_order_commence_date + | pd.DateOffset(years=2) | pandas.DateOffset |
#!/usr/bin/env python
# coding: utf-8
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytz
import random
# Date and Time
# =============
print(datetime.datetime(2000, 1, 1))
print(datetime.datetime.strptime("2000/1/1", "%Y/%m/%d"))
print(datetime.datetime(2000, 1, 1, 0, 0).strftime("%Y%m%d"))
# to_datetime
# ===========
print(pd.to_datetime("4th of July"))
print(pd.to_datetime("13.01.2000"))
print(pd.to_datetime("7/8/2000"))
print(pd.to_datetime("7/8/2000", dayfirst=True))
print(issubclass(pd.Timestamp, datetime.datetime))
ts = pd.to_datetime(946684800000000000)
print(ts.year, ts.month, ts.day, ts.weekday())
index = [pd.Timestamp("2000-01-01"),
pd.Timestamp("2000-01-02"),
pd.Timestamp("2000-01-03")]
ts = pd.Series(np.random.randn(len(index)), index=index)
print(ts)
print(ts.index)
ts = pd.Series(np.random.randn(len(index)),
index=["2000-01-01", "2000-01-02", "2000-01-03"])
print(ts.index)
index = pd.to_datetime(["2000-01-01", "2000-01-02", "2000-01-03"])
ts = pd.Series(np.random.randn(len(index)), index=index)
print(ts.index)
print(pd.date_range(start="2000-01-01", periods=3, freq='H'))
print(pd.date_range(start="2000-01-01", periods=3, freq='T'))
print(pd.date_range(start="2000-01-01", periods=3, freq='S'))
print(pd.date_range(start="2000-01-01", periods=3, freq='B'))
print(pd.date_range(start="2000-01-01", periods=5, freq='1D1h1min10s'))
print(pd.date_range(start="2000-01-01", periods=5, freq='12BH'))
bh = pd.tseries.offsets.BusinessHour(start='07:00', end='22:00')
print(bh)
print(pd.date_range(start="2000-01-01", periods=5, freq=12 * bh))
print(pd.date_range(start="2000-01-01", periods=5, freq='W-FRI'))
print(pd.date_range(start="2000-01-01", periods=5, freq='WOM-2TUE'))
s = pd.date_range(start="2000-01-01", periods=10, freq='BAS-JAN')
t = | pd.date_range(start="2000-01-01", periods=10, freq='A-FEB') | pandas.date_range |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 12 2016
@author: Hector
Class to load and store configurations
read from run, trajectory or validation files
"""
import os
import numpy as _np
import pandas as _pd
import natsort as _ns
import glob as _glob
def _hyp_split(x, listing):
param_value = x.strip().rstrip('"').replace("'", "").split('=')
pname = param_value[0].replace("__", "")
if pname not in listing:
listing.append(pname)
return param_value[1]
def _validate_config_columns(x, past_names):
split_str = x.split(':')
name_l0 = split_str[0]
name_l1 = split_str[-1]
if name_l1 in past_names and name_l1 != 'choice':
name_l1 = name_l1 + '_' + split_str[1]
past_names.append(name_l1)
return name_l0, name_l1
def _validate_choice_names(x):
split_str = x.split(':')
name = split_str[-1]
if name == 'choice':
return split_str[0]
elif(split_str[0] == 'regressor' or
split_str[0] == 'classifier' or split_str[0] == 'preprocessor'):
return name + '_' + split_str[1]
else:
return name
DEBUG = False
class ConfigReader:
def __init__(self, data_dir=None, dataset=None):
self.runs_df = None
self.bests_df = None
self.trajectories_df = None
self.dataset = dataset
self.data_dir = data_dir
self.full_config = False
def load_run_configs(self, data_dir=None, dataset=None,
preprocessor='no_preprocessing', full_config=False):
"""
Loads all configurations run by SMAC, with validation error response
:param data_dir: Directory of where SMAC files live
:param dataset: In this case, the dataset used to train the model
:param preprocessor: Preprocessing method used in the data. None means all
:param full_config: Whether to return also the configuration of the preprocessor,
imputation and one-hot-encoding
:return: pandas.DataFrame with the every performance (training errors) and the feed neural network
configurations run by SMAC
"""
if data_dir is None and self.data_dir is None:
raise ValueError('Location of information not given')
elif self.data_dir is not None:
data_dir = self.data_dir
if dataset is None:
if self.dataset is None:
raise ValueError('Dataset not given')
else:
dataset = self.dataset
run_filename = "runs_and_results-SHUTDOWN*"
state_seed = "state-run*"
if preprocessor == 'all':
scenario_dir = os.path.join(data_dir, dataset, '*', dataset, state_seed, run_filename)
elif preprocessor is not None:
scenario_dir = os.path.join(data_dir, dataset, preprocessor, dataset, state_seed, run_filename)
else:
scenario_dir = os.path.join(data_dir, dataset, state_seed, run_filename)
dirs = _ns.natsorted(_glob.glob(scenario_dir))
if len(dirs) == 0:
raise ValueError('No runs_and_results files found.')
seeds_names = ['runs_' + itseeds.split('state-run')[-1].split('/')[0] for itseeds in dirs]
all_runs = []
all_best = []
runs_by_seed = []
for fnames in dirs:
try:
run_res, best_run = self.load_run_by_file(fnames, full_config=full_config)
all_runs.append(run_res)
all_best.append(best_run)
runs_by_seed.append(run_res.shape[0])
except IndexError:
print('CRASH in: ' + os.path.split(fnames)[1])
# Treat each seed as independent runs
runs_all_df = _pd.concat(all_runs, axis=0)
runs_all_df = runs_all_df.reset_index().drop('index', axis=1)
# Try to convert to numeric type
runs_all_df = runs_all_df.apply(_pd.to_numeric, errors='ignore')
# Best config each run
best_all_df = _pd.concat(all_best, axis=1)
best_all_df = best_all_df.apply(_pd.to_numeric, errors='ignore')
best_all_df.columns = seeds_names
self.runs_df = runs_all_df.copy()
self.bests_df = best_all_df.T.copy()
return runs_all_df.copy(), best_all_df.T.copy()
@staticmethod
def load_run_by_file(fname, full_config=False):
"""
Loads one single configuration file run by SMAC, with validation error response
:param fname: filename to load
:param full_config: Whether to return also the configuration of the preprocessor,
imputation and one-hot-encoding
:return: pandas.DataFrame with configuration and validation error
"""
run_cols = ['config_id', 'response', 'runtime',
'smac_iter', 'cum_runtime', 'run_result']
try:
run_df = _pd.read_csv(fname, delimiter=",", usecols=[1, 3, 7, 11, 12, 13],
skipinitialspace=False,
header=None, skiprows=1)
except OSError:
raise OSError('file %s does not exist. Please check path' % fname)
run_df.columns = run_cols
run_df.sort_values(by='response', axis=0, ascending=False, na_position='first', inplace=True)
run_df.drop_duplicates('config_id', keep='last', inplace=True)
base_dir = os.path.dirname(fname)
# TODO: Add checks to wheter one is using a non runs_results file
config_run_match = fname.rsplit('runs_and_results-')[1].rsplit('.')[0]
config_filename = "paramstrings-" + config_run_match + "*"
if DEBUG:
print(config_filename)
try:
confname = _glob.glob(os.path.join(base_dir, config_filename))[0]
except IndexError:
raise IndexError("There is no parameter configuration file")
config_df = _pd.read_csv(confname, engine='python', delimiter=",|:\s", header=None)
# Get the values of configuration parameters
names = []
config_df.iloc[:, 1:] = config_df.iloc[:, 1:].apply(lambda x: x.apply(_hyp_split, args=(names,)))
# Almost everything that goes from the second(:) is eliminated from names
# list(map()) because python3
filtered_names = list(map(lambda X: X.split(':')[-1], filter(lambda Z: Z.split(':')[0] != 'classifier', names)))
classifier_names = list(map(lambda Y: Y.split(':')[-1], names))
# Name column and remove not-classifier parameters
config_df.columns = ['config_id'] + classifier_names
if not full_config:
# Delete classifier:choice parameter
configuration_df = config_df.drop(filtered_names, axis=1)
run_config_df = _pd.merge(run_df, configuration_df, on='config_id')
else:
run_config_df = _pd.merge(run_df, config_df, on='config_id')
# Filter configurations over the error to have a better fit
run_config_df = run_config_df[run_config_df['response'] > 0.0]
run_config_df = run_config_df[run_config_df['response'] < 1.0]
best_config_response = run_config_df.ix[run_config_df['response'].idxmin()]
# run_config_df = run_config_df.query('response > 0 and response < 1.0')
return run_config_df.copy(), best_config_response.copy()
@staticmethod
def load_trajectory_by_file(fname, full_config=False):
"""
:param fname: filename to load
:param full_config: Whether to return also the configuration of the preprocessor, imputation and one-hot-encoding
:return: pandas.DataFrame with filtered columns
"""
traj_cols = ['cpu_time', 'performance', 'wallclock_time',
'incumbentID', 'autoconfig_time']
rm_quote = lambda z: z.strip('" ')
try:
traj_res = _pd.read_csv(fname, delimiter=",",
skipinitialspace=False, converters={5: rm_quote},
header=None, skiprows=1)
except OSError:
print('file %s does not exist. Please check path' % fname)
names = []
traj_res.iloc[:, 1] = _pd.to_numeric(traj_res.iloc[:, 1], errors='coerce')
# Get the values of configuration parameters
traj_res.iloc[:, 5:-1] = traj_res.iloc[:, 5:-1].apply(lambda x: x.apply(_hyp_split, args=(names,)))
# TODO: Improve unnecesary columns droping using filter()
if full_config:
smac_cols = [tuple([a]+[b]) for a, b in zip(['smac']*len(traj_cols), traj_cols)]
from operator import itemgetter
full_parameter_names = map(lambda y: itemgetter(0, -1)(y.split(':')), names)
params_inx = _pd.MultiIndex.from_tuples(smac_cols + list(full_parameter_names) +
[('smac', 'expected')])
traj_res.columns = params_inx
traj_res.performance = _pd.to_numeric(traj_res['smac']['performance'], errors='coerce')
traj_res.sort_values(by=('smac', 'performance'), axis=0, ascending=False, na_position='first', inplace=True)
traj_res.drop_duplicates(('smac', 'incumbentID'), keep='last', inplace=True)
class_df = traj_res.drop(('smac', 'expected'), axis=1)
else:
classifier_names = list(map(_validate_choice_names, names))
traj_res.columns = traj_cols + classifier_names + ['expected']
# Drop duplicated configuration and leave the best X-validation error
traj_res.performance = _pd.to_numeric(traj_res['performance'], errors='coerce')
traj_res.sort_values(by='performance', axis=0, ascending=False, na_position='first', inplace=True)
traj_res.drop_duplicates('incumbentID', keep='last', inplace=True)
# Drop "unnecessary" columns
cols_to_drop = ['incumbentID', 'autoconfig_time', 'strategy',
'minimum_fraction', 'rescaling', 'expected']
class_df = traj_res.drop(cols_to_drop, axis=1)
return class_df.copy()
def load_trajectories(self, data_dir=None, dataset=None,
preprocessor=None, full_config=False):
"""
:param data_dir: Directory of where SMAC files live
:param dataset: Dataset used to train the model
:param preprocessor: Preprocessing method used in the data.
:param full_config: Whether to return also the configuration of the preprocessor, imputation and one-hot-encoding
:return: pandas.DataFrame with the performance (training errors) and the feed neural network configurations given
by the detailed trajectory files
"""
if data_dir is None:
if self.data_dir is None:
raise ValueError('Location of experiments not given')
else:
data_dir = self.data_dir
if dataset is None:
if self.dataset is not None:
dataset = self.dataset
else:
raise ValueError('Dataset not given')
# Could be done with find-like method, but no
traj_filename = "detailed-traj-run-*.csv"
preprocessors_list = ["Densifier", "TruncatedSVD", "ExtraTreesPreprocessorClassification",
"FastICA", "FeatureAgglomeration", "KernelPCA", "RandomKitchenSinks",
"LibLinear_Preprocessor", "NoPreprocessing", "Nystroem", "PCA",
"PolynomialFeatures", "RandomTreesEmbedding", "SelectPercentileClassification",
"SelectRates"]
if preprocessor == 'all':
scenario_dir = [os.path.join(data_dir, dataset, p, dataset, traj_filename) for p in preprocessors_list]
dirs = []
[dirs.extend(_glob.glob(p)) for p in scenario_dir]
dirs = _ns.natsorted(dirs)
elif preprocessor is not None:
scenario_dir = os.path.join(data_dir, dataset, preprocessor, dataset, traj_filename)
dirs = _ns.natsorted(_glob.glob(scenario_dir))
else:
scenario_dir = os.path.join(data_dir, dataset, traj_filename)
dirs = _ns.natsorted(_glob.glob(scenario_dir))
if len(dirs) == 0:
raise ValueError("Not file found in %s" % scenario_dir)
seeds = ['seed_' + itseeds.split('-')[-1].split('.')[0] for itseeds in dirs]
all_trajs = []
runs_by_seed = []
for fnames in dirs:
try:
run_res = self.load_trajectory_by_file(fnames, full_config=full_config)
all_trajs.append(run_res)
runs_by_seed.append(run_res.shape[0])
except IndexError:
print('CRASH in: ' + os.path.split(fnames)[1])
if preprocessor != 'all':
trajectories_df = _pd.concat(all_trajs, axis=0, keys=seeds)
drop_col = 'level_1'
else:
trajectories_df = _pd.concat(all_trajs, axis=0)
drop_col = 'index'
if full_config:
trajectories_df = (trajectories_df.reset_index().
drop(drop_col, axis=1, level=0))
else:
trajectories_df = trajectories_df.reset_index().drop(drop_col, axis=1)
if preprocessor != 'all':
trajectories_df.rename(columns={'level_0': 'run'}, inplace=True)
# Try to convert to numeric type
trajectories_df = trajectories_df.apply(_pd.to_numeric, errors='ignore')
self.trajectories_df = trajectories_df.copy()
return trajectories_df.copy()
@staticmethod
def load_validation_by_file(fname, load_config=False):
traj_cols = ['time', 'train_performance', 'test_performance']
cols_to_load = [0, 1, 2]
if load_config:
cols_to_load += [4]
traj_cols += ['config_ID']
try:
traj_res = _pd.read_csv(fname, delimiter=",", usecols=cols_to_load,
header=None, skiprows=1)
except OSError:
print('file %s does not exist. Please check path' % fname)
smac_cols = zip(['smac']*len(traj_cols), traj_cols)
traj_res.columns = _pd.MultiIndex.from_tuples(smac_cols)
traj_res = traj_res.apply(_pd.to_numeric, errors='coerce')
if load_config:
config_name = fname.replace('Results', 'CallStrings')
if not os.path.isfile(config_name):
print("Configuration file does not exists. Returning trajectory only")
return traj_res.copy()
rm_quote = lambda z: z.strip('-').replace("'", "").replace("__", "")
try:
config_res = _pd.read_csv(config_name, delimiter=",", usecols=[0, 1], header=0,
skiprows=0, skipinitialspace=False,
converters={1: rm_quote})
config_res.columns = ['config_ID', 'configuration']
except OSError:
raise OSError('file %s does not exists. Please check path' % config_name)
configuration_series = config_res.configuration.str.split('\s-(?=[a-z])', expand=True)
all_configs = []
for _, row in configuration_series.iterrows():
all_configs.append(row.dropna().str.split(' ', expand=True).set_index(0).T)
from operator import itemgetter
configs_df = _pd.concat(all_configs).reset_index(drop=True)
configuration_cols = map(lambda X: itemgetter(0, -1)(X.split(':')), configs_df.columns.values)
configs_cols = _pd.MultiIndex.from_tuples([('smac', 'config_ID')] + configuration_cols)
clean_names = []
if not configs_cols.is_unique:
from functools import partial
parfunc = partial(_validate_config_columns, past_names=clean_names)
configuration_cols = map(parfunc, configs_df.columns.values)
configs_cols = | _pd.MultiIndex.from_tuples([('smac', 'config_ID')] + configuration_cols) | pandas.MultiIndex.from_tuples |
import os
import pickle
import sys
import joblib
import pandas as pd
import pytest
from dask.datasets import timeseries
pytest.importorskip("dask_ml")
def check_trained_model(c, model_name=None):
if model_name is None:
sql = """
SELECT * FROM PREDICT(
MODEL my_model,
SELECT x, y FROM timeseries
)
"""
else:
sql = f"""
SELECT * FROM PREDICT(
MODEL {model_name},
SELECT x, y FROM timeseries
)
"""
result_df = c.sql(sql).compute()
assert "target" in result_df.columns
assert len(result_df["target"]) > 0
@pytest.fixture()
def training_df(c):
df = timeseries(freq="1d").reset_index(drop=True)
c.create_table("timeseries", df, persist=True)
return training_df
def test_training_and_prediction(c, training_df):
c.sql(
"""
CREATE MODEL my_model WITH (
model_class = 'sklearn.ensemble.GradientBoostingClassifier',
wrap_predict = True,
target_column = 'target'
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
check_trained_model(c)
def test_clustering_and_prediction(c, training_df):
c.sql(
"""
CREATE MODEL my_model WITH (
model_class = 'dask_ml.cluster.KMeans'
) AS (
SELECT x, y
FROM timeseries
LIMIT 100
)
"""
)
check_trained_model(c)
def test_iterative_and_prediction(c, training_df):
c.sql(
"""
CREATE MODEL my_model WITH (
model_class = 'sklearn.linear_model.SGDClassifier',
wrap_fit = True,
target_column = 'target',
fit_kwargs = ( classes = ARRAY [0, 1] )
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
check_trained_model(c)
def test_show_models(c, training_df):
c.sql(
"""
CREATE MODEL my_model1 WITH (
model_class = 'sklearn.ensemble.GradientBoostingClassifier',
wrap_predict = True,
target_column = 'target'
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
c.sql(
"""
CREATE MODEL my_model2 WITH (
model_class = 'dask_ml.cluster.KMeans'
) AS (
SELECT x, y
FROM timeseries
LIMIT 100
)
"""
)
c.sql(
"""
CREATE MODEL my_model3 WITH (
model_class = 'sklearn.linear_model.SGDClassifier',
wrap_fit = True,
target_column = 'target',
fit_kwargs = ( classes = ARRAY [0, 1] )
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
expected = pd.DataFrame(["my_model1", "my_model2", "my_model3"], columns=["Models"])
result: pd.DataFrame = c.sql("SHOW MODELS").compute()
# test
pd.testing.assert_frame_equal(expected, result)
def test_wrong_training_or_prediction(c, training_df):
with pytest.raises(KeyError):
c.sql(
"""
SELECT * FROM PREDICT(
MODEL my_model,
SELECT x, y FROM timeseries
)
"""
)
with pytest.raises(ValueError):
c.sql(
"""
CREATE MODEL my_model WITH (
target_column = 'target'
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
with pytest.raises(ValueError):
c.sql(
"""
CREATE MODEL my_model WITH (
model_class = 'that.is.not.a.python.class',
target_column = 'target'
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
def test_correct_argument_passing(c, training_df):
c.sql(
"""
CREATE MODEL my_model WITH (
model_class = 'mock.MagicMock',
target_column = 'target',
fit_kwargs = (
first_arg = 3,
second_arg = ARRAY [ 1, 2 ],
third_arg = MAP [ 'a', 1 ],
forth_arg = MULTISET [ 1, 1, 2, 3 ]
)
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
mocked_model, columns = c.schema[c.schema_name].models["my_model"]
assert list(columns) == ["x", "y"]
fit_function = mocked_model.fit
fit_function.assert_called_once()
call_kwargs = fit_function.call_args.kwargs
assert call_kwargs == dict(
first_arg=3, second_arg=[1, 2], third_arg={"a": 1}, forth_arg=set([1, 2, 3])
)
def test_replace_and_error(c, training_df):
c.sql(
"""
CREATE MODEL my_model WITH (
model_class = 'mock.MagicMock',
target_column = 'target'
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
first_mock, _ = c.schema[c.schema_name].models["my_model"]
with pytest.raises(RuntimeError):
c.sql(
"""
CREATE MODEL my_model WITH (
model_class = 'mock.MagicMock',
target_column = 'target'
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
c.sql(
"""
CREATE MODEL IF NOT EXISTS my_model WITH (
model_class = 'mock.MagicMock',
target_column = 'target'
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
assert c.schema[c.schema_name].models["my_model"][0] == first_mock
c.sql(
"""
CREATE OR REPLACE MODEL my_model WITH (
model_class = 'mock.MagicMock',
target_column = 'target'
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
assert c.schema[c.schema_name].models["my_model"][0] != first_mock
second_mock, _ = c.schema[c.schema_name].models["my_model"]
c.sql("DROP MODEL my_model")
c.sql(
"""
CREATE MODEL IF NOT EXISTS my_model WITH (
model_class = 'mock.MagicMock',
target_column = 'target'
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
assert c.schema[c.schema_name].models["my_model"][0] != second_mock
def test_drop_model(c, training_df):
with pytest.raises(RuntimeError):
c.sql("DROP MODEL my_model")
c.sql("DROP MODEL IF EXISTS my_model")
c.sql(
"""
CREATE MODEL IF NOT EXISTS my_model WITH (
model_class = 'mock.MagicMock',
target_column = 'target'
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
c.sql("DROP MODEL IF EXISTS my_model")
assert "my_model" not in c.schema[c.schema_name].models
def test_describe_model(c, training_df):
c.sql(
"""
CREATE MODEL ex_describe_model WITH (
model_class = 'sklearn.ensemble.GradientBoostingClassifier',
wrap_predict = True,
target_column = 'target'
) AS (
SELECT x, y, x*y > 0 AS target
FROM timeseries
LIMIT 100
)
"""
)
model, training_columns = c.schema[c.schema_name].models["ex_describe_model"]
expected_dict = model.get_params()
expected_dict["training_columns"] = training_columns.tolist()
# hack for converting model class into string
expected_series = (
pd.DataFrame.from_dict(expected_dict, orient="index", columns=["Params"])[
"Params"
]
.apply(lambda x: str(x))
.sort_index()
)
# test
result = (
c.sql("DESCRIBE MODEL ex_describe_model")
.compute()["Params"]
.apply(lambda x: str(x))
)
| pd.testing.assert_series_equal(expected_series, result) | pandas.testing.assert_series_equal |
#!/usr/bin/env python
import os
from collections import namedtuple
import numpy as np
import pandas as pd
from scipy.io import mmread
import igraph as ig
import schpf
import scipy.sparse as sp
from scipy.sparse import csr_matrix, coo_matrix
from scipy.stats import hypergeom
from scipy.spatial.distance import squareform
#from sklearn.decomposition.nmf import non_negative_factorization
#from sklearn.cluster import KMeans
from sklearn import metrics
from sklearn.metrics import silhouette_score
from sklearn.utils import sparsefuncs
import sklearn
from fastcluster import linkage
from scipy.cluster.hierarchy import leaves_list
import umap
import loompy
from tqdm import tqdm
import matplotlib as mpl
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] =42
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import gridspec
import plotly.express as px
import plotly.io as pio
import time
import itertools
import joblib
import glob
from copy import deepcopy
def mean_cellscore_fraction(cell_scores, ntop_factors=1):
""" Get number of cells with a percentage of their total scores
on a small number of factors
Parameters
----------
cell_scores : ndarray
(ncells, nfactors) array of cell scores
ntop_factors : int, optional (Default: 1)
number of factors that can count towards domance
Returns
-------
mean_cellscore_fraction : float
The mean fraction of cells' scores that are contained within
their top `ntop_factors` highest scoring factors
"""
totals = np.sum(cell_scores, axis=1)
ntop_scores = np.sort(cell_scores,axis=1)[:, -ntop_factors:]
domsum = np.sum(ntop_scores, axis=1)
domfrac = domsum/totals
return np.mean(domfrac)
def mean_cellscore_fraction_list(cell_scores):
""" Make a list of the mean dominant fraction at all possible numbers
of ntop_factors
"""
return [mean_cellscore_fraction(cell_scores, i+1)
for i in range(cell_scores.shape[1])]
def max_pairwise(gene_scores, ntop=200, second_greatest=False):
""" Get the maximum pairwise overlap of top genes
Parameters
----------
gene_scores : ndarray
(ngenes, nfactors) array of gene scores
ntop : int (optional, default 200)
Number of top genes to consider in each factor
second_greatest : bool, optional
Return the second greatest pairwise overlap of top genes
Returns
-------
max_pairwise : int
The maximum pairwise overlap of the `ntop` highest scoring genes in
each factors
p : float
Hypergeometric p value of max_pairwise, where the number of genes is
the population size, `ntop` is the number of potential successes and
the number of draws, and max_pairwise is the number of successes.
"""
tops = np.argsort(gene_scores, axis=0)[-ntop:]
max_pairwise, last_max = 0, 0
for i in range(tops.shape[1]):
for j in range(tops.shape[1]):
if i >= j:
continue
overlap = len(np.intersect1d(tops[:,i], tops[:,j]))
if overlap > max_pairwise:
last_max = max_pairwise
max_pairwise = overlap
elif overlap > last_max:
last_max = overlap
overlap = last_max if second_greatest else max_pairwise
p = hypergeom.pmf(k=overlap, M=gene_scores.shape[0],
N=ntop, n=ntop) \
+ hypergeom.sf(k=overlap, M=gene_scores.shape[0],
N=ntop, n=ntop)
Overlap = namedtuple('Overlap', ['overlap', 'p'])
return Overlap(overlap, p)
def max_pairwise_table(gene_scores, ntop_list=[50,100,150,200,250,300]):
""" Get the maximum pairwise overlap at
Parameters
----------
gene_scores : ndarray
(ngenes, nfactors) array of gene scores
ntop_list : list, optional
List of values of ntop to evaluate
Returns
-------
df : DataFrame
"""
max_overlap, p_max, max2_overlap, p_max2 = [],[],[],[]
for ntop in ntop_list:
o = max_pairwise(gene_scores, ntop, False)
max_overlap.append( o.overlap )
p_max.append( o.p )
o2 = max_pairwise(gene_scores, ntop, True)
max2_overlap.append( o2.overlap )
p_max2.append( o2.p )
df = pd.DataFrame({'ntop' : ntop_list, 'max_overlap' : max_overlap,
'p_max' : p_max, 'max2_overlap' : max2_overlap, 'p_max2' : p_max2})
return df
def split_coo_rows(X, split_indices):
"""Split a coo matrix into two
Parameters
----------
X : coo_matrix
Matrix to split into two by row
split_indices : ndarray
Indices to use for the split.
Returns
-------
a : coo_matrix
rows from X specified in split_indices
b : coo_matrix
rows from X *not* specified in split_indices
"""
a_indices = split_indices
b_indices = np.setdiff1d(np.arange(X.shape[0]), split_indices)
X_csr = X.tocsr()
a = X_csr[a_indices, :].tocoo()
b = X_csr[b_indices, :].tocoo()
return a, b
def collapse_coo_rows(coo):
"""Collapse the empty rows of a coo_matrix
Parameters
----------
coo : coo_matrix
Input coo_matrix which may have empty rows
Returns
-------
collapsed_coo : coo_matrix
coo with row indices adjusted to removed empty rows
collapsed_indices : ndarray
Indices of the returned rows in the original input matrix
"""
nz_idx = np.where(coo.getnnz(1) > 0)[0]
return coo.tocsr()[nz_idx].tocoo(), nz_idx
def insert_coo_rows(a, b, b_indices):
"""Insert rows from b into a at specified row indeces
Parameters
----------
a : sparse matrix
b : sparse matrix
b_indices : ndarray
Indices in final matrix where b's rows should be. np.max(`b_indices`)
must be a valid row index in the merged matrix with shape[0] =
a.shape[0] + b.shape[0]. Must me ordered and unique.
Returns
-------
ab :
coo_matrix with rows re-indexed to have rows from b
"""
# check arguments
if a.shape[1] != b.shape[1]:
msg = 'a.shape[1] must equal b.shape[1], received a with shape'
msg += ' {} and b with shape {}'.format(a.shape, b.shape)
raise ValueError(msg)
if np.max(b_indices) >= a.shape[0] + b.shape[0]:
msg = 'Invalid row indices {} for array with '.format(b_indices)
msg += 'a.shape[0] + b.shape[0] = {} '.format(a.shape[0])
msg += '+ {} = {}'.format(b.shape[0], a.shape[0]+b.shape[0])
raise ValueError(msg)
if not np.all(np.diff(b_indices) > 0):
msg = '`b_indices` must be ordered without repeats. Received '
msg += '{}'.format(b_indices)
raise ValueError(msg)
out_shape = (a.shape[0] + b.shape[0], a.shape[1])
a = a.tocsr()
b = b.tocsr()
a_row, b_row = 0, 0
data, indices, indptr = [], [], [0]
for ab_row in range(out_shape[0]):
if b_row < len(b_indices) and ab_row == b_indices[b_row]:
my_row = b[b_row, :]
b_row += 1
else:
my_row = a[a_row, :]
a_row += 1
data.append(my_row.data)
indices.append(my_row.indices)
indptr.append(indptr[-1] + my_row.indptr[1])
ab = csr_matrix(
(np.hstack(data), np.hstack(indices), np.array(indptr)),
out_shape).tocoo()
return ab
def minibatch_ix_generator(ncells, batchsize):
assert ncells >= batchsize # allow equalitiy for testing
ixs = np.arange(ncells)
np.random.shuffle(ixs)
start = 0
while True:
stop = start + batchsize
if stop > ncells:
stop = stop % ncells
res = np.hstack([ixs[start:ncells], ixs[0:stop]])
else:
res = ixs[start:stop]
start = stop % ncells # need mod for case where ncells=batchsize
yield res
def get_param_dfs(model):
eta_shp = pd.Series(np.ravel(model.eta.vi_shape), name=model.name)
eta_rte = pd.Series(np.ravel(model.eta.vi_rate), name=model.name)
beta_shp = pd.DataFrame(model.beta.vi_shape.T)
beta_shp.index = model.name + ':' + (beta_shp.index + 1).astype(str)
beta_rte = pd.DataFrame(model.beta.vi_rate.T, index=beta_shp.index)
return eta_shp, eta_rte, beta_shp, beta_rte
def get_spectra(models):
eta_shp, eta_rte, beta_shp, beta_rte = zip(*[get_param_dfs(m) for m in models])
return pd.concat(eta_shp, axis=1).T, | pd.concat(eta_rte,axis=1) | pandas.concat |
#####################################################################################################
# PARETO was produced under the DOE Produced Water Application for Beneficial Reuse Environmental
# Impact and Treatment Optimization (PARETO), and is copyright (c) 2021 by the software owners: The
# Regents of the University of California, through Lawrence Berkeley National Laboratory, et al. All
# rights reserved.
#
# NOTICE. This Software was developed under funding from the U.S. Department of Energy and the
# U.S. Government consequently retains certain rights. As such, the U.S. Government has been granted
# for itself and others acting on its behalf a paid-up, nonexclusive, irrevocable, worldwide license
# in the Software to reproduce, distribute copies to the public, prepare derivative works, and perform
# publicly and display publicly, and to permit other to do so.
#####################################################################################################
"""
Authors: PARETO Team
"""
from pareto.operational_water_management.operational_produced_water_optimization_model import (
ProdTank,
)
from pyomo.environ import Var
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
from enum import Enum
class PrintValues(Enum):
Detailed = 0
Nominal = 1
Essential = 2
def generate_report(model, is_print=[], fname=None):
"""
This method identifies the type of model: [strategic, operational], create a printing list based on is_print,
and creates a dictionary that contains headers for all the variables that will be included in an Excel report.
IMPORTANT: If an indexed variable is added or removed from a model, the printing lists and headers should be updated
accrodingly.
"""
# Printing model sets, parameters, constraints, variable values
printing_list = []
if model.type == "strategic":
if len(is_print) == 0:
printing_list = []
else:
# PrintValues.Detailed: Slacks values included, Same as "All"
if is_print[0].value == 0:
printing_list = [
"v_F_Piped",
"v_F_Trucked",
"v_F_Sourced",
"v_F_PadStorageIn",
"v_F_ReuseDestination",
"v_X_Capacity",
"v_T_Capacity",
"v_F_Capacity",
"v_D_Capacity",
"v_F_DisposalDestination",
"v_F_PadStorageOut",
"v_C_Piped",
"v_C_Trucked",
"v_C_Sourced",
"v_C_Disposal",
"v_C_Reuse",
"v_L_Storage",
"vb_y_Pipeline",
"vb_y_Disposal",
"vb_y_Storage",
"vb_y_Treatment",
"vb_y_FLow",
"v_F_Overview",
"v_S_FracDemand",
"v_S_Production",
"v_S_Flowback",
"v_S_PipelineCapacity",
"v_S_StorageCapacity",
"v_S_DisposalCapacity",
"v_S_TreatmentCapacity",
"v_S_ReuseCapacity",
"v_Q",
]
# PrintValues.Nominal: Essential + Trucked water + Piped Water + Sourced water + vb_y_pipeline + vb_y_disposal + vb_y_storage + etc.
elif is_print[0].value == 1:
printing_list = [
"v_F_Piped",
"v_F_Trucked",
"v_F_Sourced",
"v_C_Piped",
"v_C_Trucked",
"v_C_Sourced",
"vb_y_Pipeline",
"vb_y_Disposal",
"vb_y_Storage",
"vb_y_Flow",
"vb_y_Treatment",
"v_F_Overview",
]
# PrintValues.Essential: Just message about slacks, "Check detailed results", Overview, Economics, KPIs
elif is_print[0].value == 2:
printing_list = ["v_F_Overview"]
else:
raise Exception("Report {0} not supported".format(is_print))
headers = {
"v_F_Overview_dict": [("Variable Name", "Documentation", "Total")],
"v_F_Piped_dict": [("Origin", "destination", "Time", "Piped water")],
"v_C_Piped_dict": [("Origin", "Destination", "Time", "Cost piping")],
"v_F_Trucked_dict": [("Origin", "Destination", "Time", "Trucked water")],
"v_C_Trucked_dict": [("Origin", "Destination", "Time", "Cost trucking")],
"v_F_Sourced_dict": [
("Fresh water source", "Completion pad", "Time", "Sourced water")
],
"v_C_Sourced_dict": [
("Fresh water source", "Completion pad", "Time", "Cost sourced water")
],
"v_F_PadStorageIn_dict": [("Completion pad", "Time", "StorageIn")],
"v_F_PadStorageOut_dict": [("Completion pad", "Time", "StorageOut")],
"v_C_Disposal_dict": [("Disposal site", "Time", "Cost of disposal")],
"v_C_Treatment_dict": [("Treatment site", "Time", "Cost of Treatment")],
"v_C_Reuse_dict": [("Completion pad", "Time", "Cost of reuse")],
"v_C_Storage_dict": [("Storage Site", "Time", "Cost of Storage")],
"v_R_Storage_dict": [
("Storage Site", "Time", "Credit of Retrieving Produced Water")
],
"v_L_Storage_dict": [("Storage site", "Time", "Storage Levels")],
"v_L_PadStorage_dict": [("Completion pad", "Time", "Storage Levels")],
"vb_y_Pipeline_dict": [
("Origin", "Destination", "Pipeline Diameter", "Pipeline Installation")
],
"vb_y_Disposal_dict": [("Disposal Site", "Injection Capacity", "Disposal")],
"vb_y_Storage_dict": [
("Storage Site", "Storage Capacity", "Storage Expansion")
],
"vb_y_Flow_dict": [("Origin", "Destination", "Time", "Flow")],
"vb_y_Treatment_dict": [
("Treatment Site", "Treatment Capacity", "Treatment Expansion")
],
"v_D_Capacity_dict": [("Disposal Site", "Disposal Site Capacity")],
"v_T_Capacity_dict": [("Treatment Site", "Treatment Capacity")],
"v_X_Capacity_dict": [("Storage Site", "Storage Site Capacity")],
"v_F_Capacity_dict": [("Origin", "Destination", "Flow Capacity")],
"v_S_FracDemand_dict": [("Completion pad", "Time", "Slack FracDemand")],
"v_S_Production_dict": [("Production pad", "Time", "Slack Production")],
"v_S_Flowback_dict": [("Completion pad", "Time", "Slack Flowback")],
"v_S_PipelineCapacity_dict": [
("Origin", "Destination", "Slack Pipeline Capacity")
],
"v_S_StorageCapacity_dict": [("Storage site", "Slack Storage Capacity")],
"v_S_DisposalCapacity_dict": [("Storage site", "Slack Disposal Capacity")],
"v_S_TreatmentCapacity_dict": [
("Treatment site", "Slack Treatment Capacity")
],
"v_S_ReuseCapacity_dict": [("Reuse site", "Slack Reuse Capacity")],
"v_F_ReuseDestination_dict": [
("Completion Pad", "Time", "Total Deliveries to Completion Pad")
],
"v_F_DisposalDestination_dict": [
("Disposal Site", "Time", "Total Deliveries to Disposal Site")
],
"quality.v_Q_dict": [
("Location", "Water Component", "Time", "Water Quality")
],
"v_F_UnusedTreatedWater_dict": [
("Treatment site", "Time", "Treatment Waste Water")
],
"v_F_CompletionsWater_dict": [
("Pads", "Time", "Deliveries to completions not stored")
],
"v_F_CompletionsDestination_dict": [
("Pads", "Time", "Total deliveries to completions pads")
],
}
# Defining KPIs for strategic model
model.reuse_WaterKPI = Var(doc="Reuse Fraction Produced Water [%]")
if model.p_beta_TotalProd.value and model.v_F_TotalReused.value:
reuseWater_value = (
(model.v_F_TotalReused.value) / (model.p_beta_TotalProd.value) * 100
)
else:
reuseWater_value = 0
model.reuse_WaterKPI.value = reuseWater_value
model.disposal_WaterKPI = Var(doc="Disposal Fraction Produced Water [%]")
if model.v_F_TotalDisposed.value and model.p_beta_TotalProd.value:
disposalWater_value = (
(model.v_F_TotalDisposed.value) / (model.p_beta_TotalProd.value) * 100
)
else:
disposalWater_value = 0
model.disposal_WaterKPI.value = disposalWater_value
model.fresh_CompletionsDemandKPI = Var(
doc="Fresh Fraction Completions Demand [%]"
)
if model.v_F_TotalSourced.value and model.p_gamma_TotalDemand.value:
freshDemand_value = (
(model.v_F_TotalSourced.value) / (model.p_gamma_TotalDemand.value) * 100
)
else:
freshDemand_value = 0
model.fresh_CompletionsDemandKPI.value = freshDemand_value
model.reuse_CompletionsDemandKPI = Var(
doc="Reuse Fraction Completions Demand [%]"
)
if model.v_F_TotalReused.value and model.p_gamma_TotalDemand.value:
reuseDemand_value = (
(model.v_F_TotalReused.value) / (model.p_gamma_TotalDemand.value) * 100
)
else:
reuseDemand_value = 0
model.reuse_CompletionsDemandKPI.value = reuseDemand_value
elif model.type == "operational":
if len(is_print) == 0:
printing_list = []
else:
# PrintValues.Detailed: Slacks values included, Same as "All"
if is_print[0].value == 0:
printing_list = [
"v_F_Piped",
"v_F_Trucked",
"v_F_Sourced",
"v_F_PadStorageIn",
"v_L_ProdTank",
"v_F_PadStorageOut",
"v_C_Piped",
"v_C_Trucked",
"v_C_Sourced",
"v_C_Disposal",
"v_C_Reuse",
"v_L_Storage",
"vb_y_Pipeline",
"vb_y_Disposal",
"vb_y_Storage",
"vb_y_Truck",
"v_F_Drain",
"v_B_Production",
"vb_y_FLow",
"v_F_Overview",
"v_L_PadStorage",
"v_C_Treatment",
"v_C_Storage",
"v_R_Storage",
"v_S_FracDemand",
"v_S_Production",
"v_S_Flowback",
"v_S_PipelineCapacity",
"v_S_StorageCapacity",
"v_S_DisposalCapacity",
"v_S_TreatmentCapacity",
"v_S_ReuseCapacity",
"v_D_Capacity",
"v_X_Capacity",
"v_F_Capacity",
]
# PrintValues.Nominal: Essential + Trucked water + Piped Water + Sourced water + vb_y_pipeline + vb_y_disposal + vb_y_storage
elif is_print[0].value == 1:
printing_list = [
"v_F_Piped",
"v_F_Trucked",
"v_F_Sourced",
"v_C_Piped",
"v_C_Trucked",
"v_C_Sourced",
"vb_y_Pipeline",
"vb_y_Disposal",
"vb_y_Storage",
"vb_y_Flow",
"vb_y_Truck",
"vb_z_PadStorage",
"v_F_Overview",
]
# PrintValues.Essential: Just message about slacks, "Check detailed results", Overview, Economics, KPIs
elif is_print[0].value == 2:
printing_list = ["v_F_Overview"]
else:
raise Exception("Report {0} not supported".format(is_print))
headers = {
"v_F_Overview_dict": [("Variable Name", "Documentation", "Total")],
"v_F_Piped_dict": [("Origin", "Destination", "Time", "Piped water")],
"v_C_Piped_dict": [("Origin", "Destination", "Time", "Cost piping")],
"v_F_Trucked_dict": [("Origin", "Destination", "Time", "Trucked water")],
"v_C_Trucked_dict": [("Origin", "Destination", "Time", "Cost trucking")],
"v_F_Sourced_dict": [
("Fresh water source", "Completion pad", "Time", "Sourced water")
],
"v_C_Sourced_dict": [
("Fresh water source", "Completion pad", "Time", "Cost sourced water")
],
"v_F_PadStorageIn_dict": [("Completion pad", "Time", "StorageIn")],
"v_F_PadStorageOut_dict": [("Completion pad", "Time", "StorageOut")],
"v_C_Disposal_dict": [("Disposal site", "Time", "Cost of disposal")],
"v_C_Treatment_dict": [("Treatment site", "Time", "Cost of Treatment")],
"v_C_Reuse_dict": [("Completion pad", "Time", "Cost of reuse")],
"v_C_Storage_dict": [("Storage Site", "Time", "Cost of Storage")],
"v_R_Storage_dict": [
("Storage Site", "Time", "Credit of Retrieving Produced Water")
],
"v_L_Storage_dict": [("Storage site", "Time", "Storage Levels")],
"v_L_PadStorage_dict": [("Completion pad", "Time", "Storage Levels")],
"vb_y_Pipeline_dict": [
("Origin", "Destination", "Pipeline Diameter", "Pipeline Installation")
],
"vb_y_Disposal_dict": [("Disposal Site", "Injection Capacity", "Disposal")],
"vb_y_Storage_dict": [
("Storage Site", "Storage Capacity", "Storage Expansion")
],
"vb_z_PadStorage_dict": [("Completions Pad", "Time", "Storage Use")],
"vb_y_Flow_dict": [("Origin", "Destination", "Time", "Flow")],
"vb_y_Truck_dict": [("Origin", "Destination", "Time", "Truck")],
"v_D_Capacity_dict": [("Disposal Site", "Disposal Site Capacity")],
"v_X_Capacity_dict": [("Storage Site", "Storage Site Capacity")],
"v_F_Capacity_dict": [("Origin", "Destination", "Flow Capacity")],
"v_S_FracDemand_dict": [("Completion pad", "Time", "Slack FracDemand")],
"v_S_Production_dict": [("Production pad", "Time", "Slack Production")],
"v_S_Flowback_dict": [("Completion pad", "Time", "Slack Flowback")],
"v_S_PipelineCapacity_dict": [
("Origin", "Destination", "Slack Pipeline Capacity")
],
"v_S_StorageCapacity_dict": [("Storage site", "Slack Storage Capacity")],
"v_S_DisposalCapacity_dict": [("Storage site", "Slack Disposal Capacity")],
"v_S_TreatmentCapacity_dict": [
("Treatment site", "Slack Treatment Capacity")
],
"v_S_ReuseCapacity_dict": [("Reuse site", "Slack Reuse Capacity")],
"v_F_ReuseDestination_dict": [
("Completion Pad", "Time", "Total Deliveries to Completion Pad")
],
"v_F_DisposalDestination_dict": [
("Disposal Site", "Time", "Total Deliveries to Disposal Site")
],
"v_F_TreatmentDestination_dict": [
("Disposal Site", "Time", "Total Deliveries to Disposal Site")
],
"v_B_Production_dict": [
("Pads", "Time", "Produced Water For Transport From Pad")
],
"v_Q_dict": [("Location", "Water Component", "Time", "Water Quality")],
"v_F_UnusedTreatedWater_dict": [
("Treatment site", "Time", "Treatment Waste Water")
],
}
# Detect if the model has equalized or individual production tanks
if model.config.production_tanks == ProdTank.equalized:
headers.update(
{"v_L_ProdTank_dict": [("Pads", "Time", "Production Tank Water Level")]}
)
headers.update(
{
"v_F_Drain_dict": [
("Pads", "Time", "Produced Water Drained From Production Tank")
]
}
)
elif model.config.production_tanks == ProdTank.individual:
headers.update(
{
"v_L_ProdTank_dict": [
("Pads", "Tank", "Time", "Production Tank Water Level")
]
}
)
headers.update(
{
"v_F_Drain_dict": [
(
"Pads",
"Tank",
"Time",
"Produced Water Drained From Production Tank",
)
]
}
)
else:
raise Exception(
"Tank Type {0} is not supported".format(model.config.production_tanks)
)
else:
raise Exception("Model type {0} is not supported".format(model.type))
# Loop through all the variables in the model
for variable in model.component_objects(Var):
if variable._data is not None:
# Loop through the indices of a variable. "i" is a tuple of indices
for i in variable._data:
var_value = variable._data[i].value
if i is None:
# Create the overview report with variables that are not indexed, e.g.:
# total piped water, total trucked water, total fresh water, etc.
headers["v_F_Overview_dict"].append(
(variable.name, variable.doc, var_value)
)
# if a variable contains only one index, then "i" is recognized as a string and not a tupel,
# in that case, "i" is redefined by adding a comma so that it becomes a tuple
elif i is not None and isinstance(i, str):
i = (i,)
if i is not None and var_value is not None and var_value > 0:
headers[str(variable.name) + "_dict"].append((*i, var_value))
if model.v_C_Slack.value is not None and model.v_C_Slack.value > 0:
print("!!!ATTENTION!!! One or several slack variables have been triggered!")
# Loop for printing information on the command prompt
for i in list(headers.items())[1:]:
dict_name = i[0][: -len("_dict")]
if dict_name in printing_list:
print("\n", "=" * 10, dict_name.upper(), "=" * 10)
print(i[1][0])
for j in i[1][1:]:
print("{0}{1} = {2}".format(dict_name, j[:-1], j[-1]))
# Loop for printing Overview Information on the command prompt
for i in list(headers.items())[:1]:
dict_name = i[0][: -len("_dict")]
if dict_name in printing_list:
print("\n", "=" * 10, dict_name.upper(), "=" * 10)
# print(i[1][1][0])
for j in i[1][1:]:
if not j[0]: # Conditional that checks if a blank line should be added
print()
elif not j[
1
]: # Conditional that checks if the header for a section should be added
print(j[0].upper())
else:
print("{0} = {1}".format(j[1], j[2]))
# Printing warning if "proprietary_data" is True
if len(printing_list) > 0 and model.proprietary_data is True:
print(
"\n**********************************************************************"
)
print(" WARNING: This report contains Proprietary Data ")
print("**********************************************************************")
# Adding a footnote to the each dictionary indicating if the report contains Prorpietary Data
if model.proprietary_data is True:
for report in headers:
if len(headers[report]) > 1:
headers[report].append(("PROPRIETARY DATA",))
# Creating the Excel report
if fname is None:
fname = "PARETO_report.xlsx"
with pd.ExcelWriter(fname) as writer:
for i in headers:
df = pd.DataFrame(headers[i][1:], columns=headers[i][0])
df.fillna("")
df.to_excel(writer, sheet_name=i[: -len("_dict")], index=False, startrow=1)
return model, headers
def plot_sankey(input_data={}, args=None):
"""
This method receives data in the form of 3 seperate lists (origin, destination, value lists), generate_report dictionary
output format, or get_data dictionary output format. It then places this data into 4 lists of unique elements so that
proper indexes can be assigned for each list so that the elements will correspond with each other based off of the indexes.
These lists are then passed into the outlet_flow method which gives an output which is passed into the method to generate the
sankey diagram.
"""
# Suppress SettingWithCopyWarning because of false positives
pd.options.mode.chained_assignment = None
label = []
check_list = ["source", "destination", "value"]
if all(x in input_data.keys() for x in check_list):
input_data["type_of_data"] = "Labels"
elif "pareto_var" in input_data.keys():
input_data["type_of_data"] = None
variable = input_data["pareto_var"]
else:
raise Exception(
"Input data is not valid. Either provide source, destination, value, or a pareto_var assigned to the key pareto_var"
)
# Taking in the lists and assigning them to list variables to be used in the method
if input_data["type_of_data"] == "Labels":
source = input_data["source"]
destination = input_data["destination"]
value = input_data["value"]
# Checking if a source and destination are the same and giving the destination a new name for uniqueness
for n in range(len(source)):
if source[n] == destination[n]:
destination[n] = "{0}{1}".format(destination[n], "_TILDE")
elif input_data["type_of_data"] is None and isinstance(variable, list):
source = []
destination = []
value = []
temp_variable = []
temp_variable.append(variable[0])
# Searching for the keyword "PROPRIETARY DATA"
if "PROPRIETARY DATA" in variable[-1]:
variable.pop()
# Deleting zero values
for i in variable[1:]:
if i[-1] > 0:
temp_variable.append(i)
variable = temp_variable
# # Calling handle_time method handles user input for specific time_periods and if the variable is indexed by time
variable_updated = handle_time(variable, input_data)
# Loop through dictionaries to be included in sankey diagrams
for i in variable_updated[1:]:
source.append(i[0]) # Add sources, values, and destinations to lists
value.append(i[-1])
if i[0] == i[1]:
destination.append(
"{0}{1}".format(i[1], "_TILDE")
) # Add onto duplicate names so that they can be given a unique index
else:
destination.append(i[1])
elif input_data["type_of_data"] is None and isinstance(variable, dict):
source = []
destination = []
value = []
formatted_list = []
temp_variable = {}
# Deleting zero values
for key, val in variable.items():
if val > 0:
temp_variable.update({key: val})
variable = temp_variable
# Calling handle_time method handles user input for specific time_periods and if the variable is indexed by time
variable_updated = handle_time(variable, input_data)
# Formatting data into a list of tuples
for v in variable_updated:
formatted_list.append((*v, variable_updated[v]))
if "PROPRIETARY DATA" in formatted_list[-1]:
formatted_list.pop()
# Adding sources, destinations, and values to respective lists from tuples
for i in formatted_list:
source.append(i[0])
value.append(i[-1])
if i[0] == i[1]:
destination.append(
"{0}{1}".format(i[1], "_TILDE")
) # Add onto duplicate names so that they can be given a unique index
else:
destination.append(i[1])
else:
raise Exception(
"Type of data {0} is not supported. Available options are Labels, get_data format, and generate_report format".format(
type(variable)
)
)
# Combine locations and cut out duplicates while maintaining same order
total_labels = source + destination
label = sorted(set(total_labels), key=total_labels.index)
for s in source:
for l in label:
if s == l:
s_index = label.index(l)
for n, k in enumerate(source):
if k == s:
source[n] = s_index
for d in destination:
for l in label:
if d == l:
d_index = label.index(l)
for m, j in enumerate(destination):
if j == d:
destination[m] = d_index
# Remove added string from affected names before passing them into sankey method
for t, x in enumerate(label):
if x.endswith("_TILDE"):
label[t] = x[: -len("_TILDE")]
sum_dict = {"source": source, "destination": destination, "value": value}
sum_df = pd.DataFrame(sum_dict)
# Finding duplicates in dataframe and dropping them
df_dup = sum_df[sum_df.duplicated(subset=["source", "destination"], keep=False)]
df_dup = df_dup.drop_duplicates(subset=["source", "destination"], keep="first")
# Looping through dataframe and summing the total values of each node and assigning it to its instance
for index, row in df_dup.iterrows():
new_value = 0
new_value = sum_df.loc[
(sum_df["source"] == row["source"])
& (sum_df["destination"] == row["destination"]),
"value",
].sum()
sum_df.at[index, "value"] = new_value
df_updated = sum_df.drop_duplicates(subset=["source", "destination"], keep="first")
source = df_updated["source"].to_list()
destination = df_updated["destination"].to_list()
value = df_updated["value"].to_list()
updated_label = outlet_flow(source, destination, label, value)
generate_sankey(source, destination, value, updated_label, args)
def handle_time(variable, input_data):
"""
The handle_time method checks if a variable is indexed by time and checks if a user
has passed in certain time periods they would like to use for the data. It then appends
those rows of data with the specified time value to a new list which is returned in the
plot_sankey method.
"""
# Checks the type of data that is passed in and if it is indexed by time
indexed_by_time = False
if isinstance(variable, list):
time_var = []
for i in variable[:1]:
i = [j.title() for j in i]
if "Time" in i:
indexed_by_time = True
if indexed_by_time == True:
if (
"time_period" in input_data.keys()
): # Checks if user passes in specific time periods they want used in the diagram, if none passed in then it returns original list
for y in variable[1:]:
if y[-2] in input_data["time_period"]:
time_var.append((y))
if len(time_var) == 0:
raise Exception(
"The time period the user provided does not exist in the data"
)
else:
return time_var
else:
return variable
else:
return variable
else:
time_var = {}
if "labels" in input_data:
for i in input_data["labels"]:
i = [j.title() for j in i]
if "Time" in i:
indexed_by_time = True
if indexed_by_time == True:
if (
"time_period" in input_data.keys()
): # Checks if user passes in specific time periods they want used in the diagram, if none passed in then it returns original dictionary
for key, y in variable.items():
if key[-1] in input_data["time_period"]:
time_var.update({key: y})
if len(time_var) == 0:
raise Exception(
"The time period the user provided does not exist in the data"
)
else:
return time_var
else:
return variable
else:
return variable
else:
raise Exception("User must provide labels when using Get_data format.")
def outlet_flow(source=[], destination=[], label=[], value=[]):
"""
The outlet_flow method receives source, destination, label, and value lists and
sums the total value for each label. This value is then added to the label string and
updated label lists so that it can be displayed on each node as "label:value". This updated label
list is output to be used in the generate_sankey method.
"""
# Loop through each list finding where labels match sources and destination and totaling/rounding their values to be used in updated label list
for x, l in enumerate(label):
output = 0
v_count = []
for s, g in enumerate(source):
if g == x:
v_count.append(s)
if len(v_count) == 0:
for d, h in enumerate(destination):
if h == x:
v_count.append(d)
for v in v_count:
output = output + float(value[v])
rounded_output = round(output, 0)
integer_output = int(rounded_output)
value_length = len(str(integer_output))
if value_length >= 4 and value_length <= 7:
integer_output = str(int(integer_output / 1000)) + "k"
elif value_length >= 8:
integer_output = str(int(integer_output / 1000000)) + "M"
label[x] = "{0}:{1}".format(l, integer_output)
return label
def generate_sankey(source=[], destination=[], value=[], label=[], args=None):
"""
This method receives the final lists for source, destination, value, and labels to be used
in generating the plotly sankey diagram. It also receives arguments that determine font size and
plot titles. It outputs the sankey diagram in an html format that is automatically opened
in a browser.
"""
format_checklist = ["jpg", "jpeg", "pdf", "png", "svg"]
figure_output = ""
# Checking arguments and assigning appropriate values
if args is None:
font_size = 20
plot_title = "Sankey Diagram"
elif args["font_size"] is None:
font_size = 20
elif args["plot_title"] is None:
plot_title = "Sankey Diagram"
elif args["output_file"] is None:
figure_output = "first_sankey.html"
else:
font_size = args["font_size"]
plot_title = args["plot_title"]
figure_output = args["output_file"]
# Creating links and nodes based on the passed in lists to be used as the data for generating the sankey diagram
link = dict(source=source, target=destination, value=value)
node = dict(label=label, pad=30, thickness=15, line=dict(color="black", width=0.5))
data = go.Sankey(link=link, node=node)
# Assigning sankey diagram to fig variable
fig = go.Figure(data)
# Updating the layout of the sankey and formatting based on user passed in arguments
fig.update_layout(
title_font_size=font_size * 2,
title_text=plot_title,
title_x=0.5,
font_size=font_size,
)
if ".html" in figure_output:
fig.write_html(figure_output, auto_open=False)
elif any(x in figure_output for x in format_checklist):
fig.write_image(figure_output, height=850, width=1800)
else:
exception_string = ""
for x in format_checklist:
exception_string = exception_string + ", " + x
raise Exception(
"The file format provided is not supported. Please use either html{}.".format(
exception_string
)
)
def plot_bars(input_data, args):
"""
This method creates a bar chart based on a user passed in dictionary or list that is created from the get_data or generate_report methods.
The dictionary or list is assigned to the key 'pareto_var' of the input_data dictionary and the method then determines the type of variable
and proceeds accordingly. These variables are checked if they are indexed by time, if true then an animated bar chart is created, if false then
a static bar chart is created. In addition to the input_data dictionary, another dictionary named 'args' is passed in containing arguments for customizing
the bar chart. The args dictionary keys are 'plot_title', 'y_axis', 'group_by', and 'labels' which is only required if the variable is of get_data format(dictionary).
The 'y_axis' key is optional and accepts the value 'log' which will take the logarithm of the y axis. If 'y_axis' is not passed in then the axis will default to linear.
The 'group_by' key accepts a value that is equal to a column name of the variable data, this will specify which column to use for the x axis. Finally, the 'labels'
key accepts a tuple of labels to be assigned to the get_data format(list) variable since no labels are provided from the get_data method.
"""
# Suppress SettingWithCopyWarning because of false positives
pd.options.mode.chained_assignment = None
y_range = []
tick_text = []
time_list = []
indexed_by_time = False
date_time = False
print_data = False
format_checklist = ["jpg", "jpeg", "pdf", "png", "svg"]
figure_output = ""
if "output_file" not in args.keys() or args["output_file"] is None:
figure_output = "first_bar.html"
else:
figure_output = args["output_file"]
# Check for variable data and throw exception if no data is provided
if "pareto_var" in input_data.keys():
variable = input_data["pareto_var"]
else:
raise Exception(
"Input data is not valid. Provide a pareto_var assigned to the key pareto_var"
)
# Give group_by and plot_title a value of None/"" if it is not provided
if "group_by" not in args.keys():
args["group_by"] = None
# Assign print_data to the user passed in value
if "print_data" in args.keys():
print_data = args["print_data"]
if args["plot_title"] == "" or args["group_by"] is None:
plot_title = ""
else:
plot_title = args["plot_title"]
# Check if log was passed in as an option for the y axis and create a boolean for it
if "y_axis" not in args.keys():
log_y = False
yaxis_type = "linear"
elif args["y_axis"] == "log":
log_y = True
yaxis_type = "log"
else:
raise Warning("Y axis type {} is not supported".format(args["y_axis"]))
# Check the type of variable passed in and assign labels/Check for time indexing
if isinstance(variable, list):
for i in variable[:1]:
i = [j.title() for j in i]
if args["group_by"] == "" or args["group_by"] is None:
x_title = i[0]
y_title = i[-1]
if "Time" in i:
indexed_by_time = True
time = "Time"
elif args["group_by"].title() in i: # add default group_by as first column
y_title = i[-1]
x_title = args["group_by"].title()
if "Time" in i:
indexed_by_time = True
time = "Time"
formatted_variable = variable[1:]
elif isinstance(variable, dict):
formatted_list = []
for v in variable:
formatted_list.append((*v, variable[v]))
if input_data["labels"] is not None:
for i in input_data["labels"]:
i = [j.title() for j in i]
x_title = i[0]
y_title = i[-1]
if "Time" in i:
indexed_by_time = True
time = "Time"
else:
raise Exception("User must provide labels when using Get_data format.")
formatted_variable = formatted_list
else:
raise Exception(
"Type of data {0} is not supported. Valid data formats are list and dictionary".format(
type(variable)
)
)
if indexed_by_time:
# Create dataframes for use in the method
df = pd.DataFrame(columns=i)
df_bar = df[[x_title, time, y_title]]
df_new = pd.DataFrame(formatted_variable, columns=i)
df_new = df_new.round(0)
df_modified = df_new[[x_title, time, y_title]]
char_checklist = ["/", "-"]
removed_char = ""
if any(x in df_modified[time][0] for x in char_checklist):
df_modified[time] = pd.to_datetime(df_modified[time]).dt.date
date_time = True
else:
removed_char = df_modified[time][0][:1]
df_modified[time] = df_modified[time].apply(lambda x: x.strip(removed_char))
df_modified[time] = df_modified[time].apply(lambda x: pd.to_numeric(x))
for d, y in df_modified.iterrows():
time_list.append(y[time])
time_loop = set(time_list)
time_loop = sorted(time_loop)
# Loop through time list and give any nodes without a value for that time a 0
for ind, x in df_modified.iterrows():
time_value = df_modified.loc[df_modified[x_title] == x[x_title], time]
for t in time_loop:
if t not in time_value.values:
df_modified.loc[len(df_modified.index)] = [x[x_title], t, 1e-10]
# Take the sums of flows from nodes to destinations that have the same time period and locations
df_dup = df_modified[df_modified.duplicated(subset=[x_title, time], keep=False)]
df_dup = df_dup.drop_duplicates(subset=[x_title, time], keep="first")
for index, row in df_dup.iterrows():
new_value = 0
new_value = df_modified.loc[
(df_modified[x_title] == row[x_title])
& (df_modified[time] == row[time]),
y_title,
].sum()
df_modified.at[index, y_title] = new_value
df_bar = df_modified.drop_duplicates(subset=[x_title, time], keep="first")
# Get all y values and then calculate the max for the y axis range
for a, b in df_bar.iterrows():
y_range.append(b[y_title])
tick_text.append(b[x_title])
for y, x in enumerate(y_range):
y_range[y] = float(x)
max_y = max(y_range)
# Sort by time and x values
df_time_sort = df_bar.sort_values(by=[time, x_title])
# If time is of type datetime, convert to string for figure processing
if date_time:
df_time_sort[time] = df_time_sort[time].apply(lambda x: str(x))
else:
df_time_sort[time] = df_time_sort[time].apply(
lambda x: removed_char + str(x)
)
# Create bar chart with provided data and parameters
fig = px.bar(
df_time_sort,
x=x_title,
y=y_title,
color=x_title,
animation_frame=time,
range_y=[1, max_y * 1.02],
title=plot_title,
log_y=log_y,
)
fig.update_layout(
font_color="#fff",
paper_bgcolor="#333",
plot_bgcolor="#ccc",
)
# Update animation settings
fig.layout.updatemenus[0].buttons[0].args[1]["frame"]["duration"] = 200
fig.layout.updatemenus[0].buttons[0].args[1]["frame"]["redraw"] = False
fig.layout.updatemenus[0].buttons[0].args[1]["transition"]["duration"] = 1000
fig.layout.updatemenus[0].buttons[0].args[1]["transition"]["easing"] = "linear"
if print_data:
# Printing dataframe that is used in bar chart
with pd.option_context(
"display.max_rows",
None,
"display.max_columns",
None,
"display.precision",
1,
):
print(df_time_sort)
# Write the figure to html format and open in the browser
if ".html" in figure_output:
fig.write_html(figure_output, auto_open=False, auto_play=False)
elif any(x in figure_output for x in format_checklist):
fig.write_image(figure_output, height=850, width=1800)
else:
exception_string = ""
for x in format_checklist:
exception_string = exception_string + ", " + x
raise Exception(
"The file format provided is not supported. Please use either html{}.".format(
exception_string
)
)
else:
# Create dataframe for use in the method
df_new = pd.DataFrame(formatted_variable, columns=i)
# Take the sums of flows from nodes to destinations that have the same locations
df_modified = df_new[df_new.duplicated(subset=[x_title], keep=False)]
for index, row in df_modified.iterrows():
new_value = 0
new_value = df_modified.loc[
df_modified[x_title] == row[x_title], y_title
].sum()
df_new.at[index, y_title] = new_value
df_new_updated = df_new.drop_duplicates(subset=[x_title], keep="first")
# Get all values and then calculate the max for the y axis range
for a, b in df_new_updated.iterrows():
y_range.append(b[y_title])
for y, x in enumerate(y_range):
y_range[y] = float(x)
max_y = max(y_range)
# Create bar chart with provided data and parameters
fig = px.bar(
df_new_updated,
x=x_title,
y=y_title,
range_y=[0, max_y * 1.02],
color=x_title,
title=plot_title,
text=y_title,
)
fig.update_layout(
font_color="#fff",
paper_bgcolor="#333",
plot_bgcolor="#ccc",
yaxis_type=yaxis_type,
)
if print_data:
# Printing dataframe that is used in bar chart
with pd.option_context(
"display.max_rows",
None,
"display.max_columns",
None,
"display.precision",
1,
):
print(df_new_updated)
if ".html" in figure_output:
fig.write_html(figure_output, auto_open=False)
elif any(x in figure_output for x in format_checklist):
fig.write_image(figure_output, height=850, width=1800)
else:
exception_string = ""
for x in format_checklist:
exception_string = exception_string + ", " + x
raise Exception(
"The file format provided is not supported. Please use either html{}.".format(
exception_string
)
)
def plot_scatter(input_data, args):
"""
The plot_scatter method creates a scatter plot based on two variables that are assigned to x and y,
and a dictionary of arguments including labels, size specifications, group by and chart title. The variables
that are passed in can be of type list (generate_report format) and of type dictionary (get_data format). Labels and
size arguments are then interpreted/assigned appropriately to create the scatter plots. This method will produce two different
kinds of scatter plots depending on if the variables are indexed by time or not. If they are indexed by time, an animated plot
will be created, if they are not indexed by time, a static plot will be created. Before the manipulation and framing of this data
is completed, missing values in the datasets are detected and mitigated by giving them a value of 0 if none is provided. The size argument
is then sorted out either by calculating the ratio that the user provided as 'y/x' or 'x/y', or taking the variable that was provided
for the size argument and assigning those size values to their respective rows. Once these data modifications are completed, the scatter plot
is created with the data and arguments that are provided.
"""
# Suppress SettingWithCopyWarning because of false positives
pd.options.mode.chained_assignment = None
y_range = []
x_range = []
time_list = []
category_list = []
indexed_by_time = False
provided_size = False
is_list = False
is_dict = False
s_variable = None
date_time = False
print_data = False
group_by_category = False
category_variable = None
size = "Size"
format_checklist = ["jpg", "jpeg", "pdf", "png", "svg"]
figure_output = ""
# Checks if output_file has been passed in as a user argument
if "output_file" not in args.keys() or args["output_file"] is None:
figure_output = "first_scatter_plot.html"
else:
figure_output = args["output_file"]
# Assigns boolean variable to True if labels have been provided in the arguments
check_list = ["labels_x", "labels_y"]
if all(x in input_data.keys() for x in check_list):
has_labels = True
else:
has_labels = False
# Assign print_data to the user passed in value
if "print_data" in args.keys():
print_data = args["print_data"]
# Checking for size argument and checking the type
if "size" in input_data.keys():
s_variable = input_data["size"]
provided_size = True
if isinstance(s_variable, list):
is_list = True
elif isinstance(s_variable, dict):
is_dict = True
# Check if group by category argument was provided
if "group_by_category" in args.keys():
if isinstance(args["group_by_category"], bool):
group_by_category = args["group_by_category"]
elif isinstance(args["group_by_category"], dict):
category_variable = args["group_by_category"]
for c in category_variable:
category_list.append((c, category_variable[c]))
df_category = pd.DataFrame(category_list, columns=["Node", "Category"])
elif isinstance(args["group_by_category"], list):
category_variable = args["group_by_category"][1:]
df_category = pd.DataFrame(category_variable, columns=["Node", "Category"])
else:
raise Exception(
'Invalid type for argument "group_by_category". Must be of type boolean, list variable or dictionary variable.'
)
variable_x = input_data["pareto_var_x"]
variable_y = input_data["pareto_var_y"]
if isinstance(variable_x, list) and isinstance(variable_y, list):
for i, g in zip(variable_x[:1], variable_y[:1]):
i = [j.title() for j in i]
g = [l.title() for l in g]
x_title = i[-1]
y_title = g[-1]
if args["group_by"] is not None:
col_1 = args["group_by"]
else:
col_1 = i[0]
if "Time" in i and "Time" in g:
indexed_by_time = True
time = "Time"
elif "Time" not in i and "Time" not in g:
indexed_by_time = False
else:
raise Exception(
"Cannot create scatter plot unless BOTH variables are/are not indexed by time"
)
formatted_variable_x = variable_x[1:]
formatted_variable_y = variable_y[1:]
# If size is provided in the form of a list, grab labels for size and check if indexed by time compared to x and y variables
if provided_size and is_list:
for s in s_variable[:1]:
s = [s.title() for j in s]
s_title = s[-1]
if indexed_by_time and "Time" not in s:
raise Exception(
"Both x and y variables are indexed by time. Size variable must also be indexed by time to create scatter plot."
)
s_variable = s_variable[1:]
elif isinstance(variable_x, dict) and isinstance(variable_y, dict):
formatted_list_x = []
formatted_list_y = []
v_tuples = []
# Get a list of tuples which are the keys from both variables, Example => ('N01','N05','T01')
for t in variable_x:
v_tuples.append(t)
for u in variable_y:
v_tuples.append(u)
v_tuples = list(set(v_tuples))
# Use list of tuples to find any missing rows and assign value of 0
for tup in v_tuples:
if tup not in variable_x:
variable_x[tup] = 0
if tup not in variable_y:
variable_y[tup] = 0
for l, k in zip(variable_x, variable_y):
formatted_list_x.append((*l, variable_x[l]))
formatted_list_y.append((*k, variable_y[k]))
if has_labels:
for i in input_data["labels_x"]:
i = [j.title() for j in i]
x_title = i[-1]
for g in input_data["labels_y"]:
g = [r.title() for r in g]
y_title = g[-1]
if args["group_by"] is not None:
col_1 = args["group_by"]
else:
col_1 = i[0]
if "Time" in i and "Time" in g:
indexed_by_time = True
time = "Time"
elif "Time" not in i and "Time" not in g:
indexed_by_time = False
else:
raise Exception(
"Cannot create scatter plot unless BOTH variables are/are not indexed by time"
)
else:
raise Exception(
"User must provide labels for both x and y when using Get_data format."
)
formatted_variable_x = formatted_list_x
formatted_variable_y = formatted_list_y
# If size is provided in the form of a dictionary, grab labels for size and check if indexed by time compared to x and y variables
if provided_size and is_dict:
size_list = []
for v in s_variable:
size_list.append((*v, s_variable[v]))
s_variable = size_list
if "labels_size" in input_data.keys():
for s in input_data["labels_size"]:
s = [k.title() for k in s]
s_title = s[-1]
if indexed_by_time and "Time" not in s:
raise Exception(
"Both x and y variables are indexed by time. Size variable must also be indexed by time to create scatter plot."
)
else:
raise Exception("User must provide labels for the size variable ")
else:
raise Exception(
"Type of data {0} or {1} is not supported. Available options are list and dictionary. Both variables must be the same type of data.".format(
type(variable_x), type(variable_y)
)
)
if indexed_by_time:
# Creating dataframe based on the passed in variable and rounding the values
df_new_x = pd.DataFrame(formatted_variable_x, columns=i)
df_new_x = df_new_x.round(0)
df_modified_x = df_new_x[[col_1, time, x_title]]
df_new_y = pd.DataFrame(formatted_variable_y, columns=g)
df_new_y = df_new_y.round(0)
df_modified_y = df_new_y[[col_1, time, y_title]]
# Check if time period is in datetime format or in letter number format
char_checklist = ["/", "-"]
removed_char = ""
if any(x in df_modified_x[time][0] for x in char_checklist):
df_modified_x[time] = pd.to_datetime(df_modified_x[time]).dt.date
df_modified_y[time] = pd.to_datetime(df_modified_y[time]).dt.date
date_time = True
else:
removed_char = df_modified_x[time][0][:1]
df_modified_x[time] = df_modified_x[time].apply(
lambda x: x.strip(removed_char)
)
df_modified_y[time] = df_modified_y[time].apply(
lambda x: x.strip(removed_char)
)
df_modified_x[time] = df_modified_x[time].apply(lambda x: | pd.to_numeric(x) | pandas.to_numeric |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: Estimator.py
@time: 2019-09-05 15:14
"""
def data_split(df, mode='train_val', shuffle=False, stratify=None, random_state=14):
"""
:param df:
:param mode:
:param shuffle:
:param stratify:
:param random_state:
:return:
"""
from sklearn.model_selection import train_test_split
if df.shape[0] > 100000:
test_size = 0.05
elif df.shape[0] > 50000:
test_size = 0.075
elif df.shape[0] > 10000:
test_size = 0.1
else:
test_size = 0.25
print(f'sample sum: {df.shape[0]}, test_size: {test_size}')
if stratify is not None:
df_train, df_test, stratify, _ = train_test_split(df, stratify, test_size=test_size, shuffle=shuffle, stratify=stratify, random_state=random_state)
else:
df_train, df_test = train_test_split(df, test_size=test_size, shuffle=shuffle, stratify=stratify, random_state=random_state)
if mode == 'train_val':
return df_train, df_test
elif mode == 'train_val_test':
df_train, df_val = train_test_split(df_train, test_size=test_size, shuffle=shuffle, stratify=stratify, random_state=random_state)
return df_train, df_val, df_test
class DataProcessor:
def __init__(self, features=None, labels=None, cat_cols=None, split=None, random_state=14):
self._features = features
self._labels = labels
self._cat_cols = cat_cols
from sklearn.model_selection import train_test_split
self._features_train, self._features_val, self._labels_train, self._labels_val = train_test_split(features, labels,
test_size=split, random_state=random_state,
stratify=labels)
def cv_input_fn(self):
return self._features, self._labels, self._cat_cols
def train_input_fn(self):
return self._features_train, self._labels_train, self._cat_cols
def eval_input_fn(self):
return self._features_val, self._labels_val, self._cat_cols
def test_input_fn(self, features, batch_size=None):
pass
class FastTextDataProcessor(DataProcessor):
def __init__(self, features=None, labels=None, split=0.1, random_state=14):
"""
注意:这里的输入都是没有经过分词处理的短句
:param features: Series
:param labels: Series
:param cat_cols:
:param split:
:param random_state:
"""
super().__init__(features=features, labels=labels, split=split, random_state=random_state)
def train_input_fn(self, dest_train_file_path='data_gen/trainset.txt'):
"""
:param dest_train_file_path:
:return:
"""
train_set = self._features_train.map(lambda x: ' '.join(list(x))) + ' __label__' + self._labels_train.map(
str) + '\n'
with open(dest_train_file_path, 'w', encoding='utf8') as f:
f.writelines(train_set.tolist())
return train_set
def eval_input_fn(self, dest_val_file_path='data_gen/valset.txt'):
val_set = self._features_val.map(lambda x: ' '.join(list(x))) + ' __label__' + self._labels_val.map(
str) + '\n'
with open(dest_val_file_path, 'w', encoding='utf8') as f:
f.writelines(val_set.tolist())
return val_set
@classmethod
def test_input_fn(self, features):
"""
:param features: Series
:return:
"""
return features.map(lambda x: ' '.join(list(x)))
class Estimator(object):
"""Base Estimator class.
参考 https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/estimator.py
"""
def __init__(self, params: dict=None, data_processor: DataProcessor=None):
"""
Constructs an `Estimator` instance.
:param params:
:param data_processor:
"""
self._params = params
self._data_processor = data_processor
def model_fn(self):
return ""
def train(self,
steps=None):
"""
:param steps: Number of steps for which to train the model.
:return:
"""
pass
def evaluate(self, steps=None):
"""
Evaluates the model given evaluation data `input_fn`.
For each step, calls `input_fn`, which returns one batch of data.
:param steps:
:param hooks:
:param checkpoint_path:
:param name:
:return:
"""
pass
def test(self,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
"""
Yields predictions for given features.
:param predict_keys:
:param hooks:
:param checkpoint_path:
:param yield_single_examples:
:return:
"""
pass
def baseline(self):
"""
一个使用默认参数的模型工作流:[cv,] train, val, test
:return:
"""
pass
def save_model(self):
pass
def load_model(self):
pass
class ClsEstimator(Estimator):
"""Classifier Estimator class.
"""
def __init__(self, params: dict=None, data_processor: DataProcessor=None, model_path: str=None):
"""
Constructs an `Estimator` instance.
:param params:
:param data_processor:
:param model_path: 模型的路径
"""
self._params = params
self._data_processor = data_processor
self._model_path = model_path
def model_fn(self):
raise NotImplementedError()
def train(self,
steps=None):
"""
:param steps: Number of steps for which to train the model.
:return:
"""
raise NotImplementedError()
def evaluate(self, steps=None):
"""
Evaluates the model given evaluation data `input_fn`.
For each step, calls `input_fn`, which returns one batch of data.
:param steps:
:param hooks:
:param checkpoint_path:
:param name:
:return:
"""
raise NotImplementedError()
def train_eval(self):
"""
train & evaluate
:return:
"""
pass
def cross_val(self):
"""
:return:
"""
raise NotImplementedError()
def test(self,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
"""
Yields predictions for given features.
:param predict_keys:
:param hooks:
:param checkpoint_path:
:param yield_single_examples:
:return:
"""
raise NotImplementedError()
def explain(self):
"""模型解释"""
raise NotImplementedError()
class CatBoostClsEstimator(ClsEstimator):
def __init__(self, params=None, data_processor=None, model_path=None):
"""
:param params:
params = {
'iterations': iterations,
# learning_rate: 0.1,
# 'custom_metric': 'AUC',
'custom_metric': custom_metric,
'loss_function': 'CrossEntropy'
}
"""
super().__init__(params=params, data_processor=data_processor, model_path=model_path)
if self._model_path:
self.load_model(self._model_path)
else:
self._model = self.model_fn()
def model_fn(self):
from catboost import CatBoostClassifier
return CatBoostClassifier(**self._params) if self._params else CatBoostClassifier()
def train(self, plot=True, verbose=True, show_features_importance=True, init_model=None):
"""
:param plot:
:param verbose:
:param show_features_importance:
:param init_model:
CatBoost class or string, [default=None]
Continue training starting from the existing model.
If this parameter is a string, load initial model from the path specified by this string.
:return:
"""
from catboost import Pool
import pandas as pd
features, labels, cat_cols = self._data_processor.train_input_fn()
train_data = Pool(data=features,
label=labels,
cat_features=cat_cols)
self._model.fit(train_data, plot=plot, verbose=verbose)
if show_features_importance:
df_features_importance = pd.DataFrame({'name': self._model.feature_names_,
'value': self._model.feature_importances_})
df_features_importance = df_features_importance.sort_values('value', ascending=False)
df_features_importance.reset_index(drop=True, inplace=True)
print(df_features_importance.head(5))
import matplotlib.pyplot as plt
fea_ = df_features_importance.sort_values('value')[df_features_importance.value > 0].value
fea_name = df_features_importance.sort_values('value')[df_features_importance.value > 0].name
plt.figure(figsize=(10, 20))
plt.barh(fea_name, fea_, height=0.5)
plt.show()
return df_features_importance
def evaluate(self, df_val=None, y_val=None, cat_cols=None):
from sklearn.metrics import classification_report
from catboost import Pool
import pandas as pd
if not df_val:
df_val, y_val, cat_cols = self._data_processor.eval_input_fn()
test_data = Pool(data=df_val,
cat_features=cat_cols)
r = self._model.predict(test_data)
print(pd.Series(r).value_counts())
print(classification_report(y_val, r))
dfr = pd.DataFrame(y_val)
dfr.columns = ['true_label']
y_test_hat = self._model.predict_proba(test_data)[:, 1]
dfr['score'] = y_test_hat
dfr['predict_label'] = r
dfr = dfr.sort_values('score', ascending=False)
dfr['order'] = range(1, dfr.shape[0] + 1)
print(dfr[dfr.true_label == 1])
return dfr
def train_eval(self, plot=True, verbose=True, show_features_importance=True, init_model=None, use_best_model: bool=True, early_stopping_rounds: int=None):
"""
:param plot:
:param verbose:
:param show_features_importance:
:param init_model:
CatBoost class or string, [default=None]
Continue training starting from the existing model.
If this parameter is a string, load initial model from the path specified by this string.
:param use_best_model:
:param early_stopping_rounds:
:return:
"""
from catboost import Pool
import pandas as pd
features, labels, cat_cols = self._data_processor.train_input_fn()
train_data = Pool(data=features,
label=labels,
cat_features=cat_cols)
df_val, y_val, cat_cols = self._data_processor.eval_input_fn()
val_data = Pool(data=df_val, label=y_val, cat_features=cat_cols)
self._model.fit(train_data, eval_set=val_data, plot=plot, verbose=verbose)
if show_features_importance:
df_features_importance = pd.DataFrame({'name': self._model.feature_names_,
'value': self._model.feature_importances_})
df_features_importance = df_features_importance.sort_values('value', ascending=False)
df_features_importance.reset_index(drop=True, inplace=True)
print(df_features_importance.head(5))
import matplotlib.pyplot as plt
fea_ = df_features_importance.sort_values('value')[df_features_importance.value > 0].value
fea_name = df_features_importance.sort_values('value')[df_features_importance.value > 0].name
plt.figure(figsize=(10, 20))
plt.barh(fea_name, fea_, height=0.5)
plt.show()
return df_features_importance
def cross_val(self, nfold=3, shuffle=True, stratified=None, plot=True, partition_random_seed: int=14):
"""
:param nfold:
:param shuffle:
:param stratified:
:param plot:
:param partition_random_seed:
:return:
cv results : pandas.core.frame.DataFrame with cross-validation results
columns are: test-error-mean test-error-std train-error-mean train-error-std
"""
from catboost import Pool, cv
import numpy as np
features, labels, cat_cols = self._data_processor.cv_input_fn()
cv_data = Pool(data=features,
label=labels,
cat_features=cat_cols)
cv_result = cv(cv_data, self._params, nfold=nfold, shuffle=shuffle, stratified=stratified, plot=plot, partition_random_seed=partition_random_seed)
print('Best validation {} score: {:.2f}±{:.2f} on step {}'.format(
self._params['custom_metric'],
np.max(cv_result[f'test-{self._params["custom_metric"]}-mean']),
cv_result[f'test-{self._params["custom_metric"]}-std'][np.argmax(cv_result[f'test-{self._params["custom_metric"]}-mean'])],
np.argmax(cv_result[f'test-{self._params["custom_metric"]}-mean'])
))
print('Precise validation {} score: {}'.format(self._params['custom_metric'], np.max(cv_result[f'test-{self._params["custom_metric"]}-mean'])))
return cv_result
def baseline(self):
import numpy as np
cv_result = self.cross_val()
self._params.update({
'iterations': np.argmax(cv_result[f'test-{self._params["custom_metric"]}-mean']),
})
print(self._params)
df_features_importance = self.train_eval()
dfr = self.evaluate()
return df_features_importance, dfr
def test(self, df_test=None, cat_cols=None, explain=True):
from catboost import Pool
import pandas as pd
if df_test is None:
df_test, cat_cols = self._data_processor.test_input_fn()
test_data = Pool(data=df_test,
cat_features=cat_cols)
dfr = pd.DataFrame(df_test.index)
y_test_hat = self._model.predict_proba(test_data)[:, 1]
dfr['score'] = y_test_hat
dfr['test_label'] = self._model.predict(test_data)
s = dfr['test_label'].value_counts()
print(s)
print(f'su sample num:{s.loc[1] if 1 in s else 0}')
if explain:
rr = self.explain(df_test, cat_cols, dfr)
return dfr, rr
return dfr
def explain(self, df_test, cat_cols, dfr):
"""模型解释"""
from catboost import Pool
import pandas as pd
test_data = Pool(data=df_test, cat_features=cat_cols)
shap_values = self._model.get_feature_importance(test_data, type='ShapValues')
dfs = pd.DataFrame(shap_values[:, :-1], columns=df_test.columns, index=df_test.index)
dfs_T = dfs.T
ss = []
for i in range(dfs_T.shape[1]):
ss.append(dfs_T.iloc[:, i].copy().sort_values(ascending=False).iloc[:5])
count = 0
rr = []
for line in dfr[dfr.test_label == 1].itertuples():
rr.append({"id": line[0], "SCORE": round(line.score, 2),
"EXPLAIN": ','.join(
[f'{i[0]}:{round(i[1], 2)}' for i in list(zip(ss[count].index, ss[count].values))])})
count += 1
print(rr)
return rr
def save_model(self, model_path):
"""
:param model_path: 'catboost_model.dump'
:return:
"""
self._model.save_model(model_path)
def load_model(self, model_path):
self._model = self.model_fn().load_model(model_path)
class XGBoostClsEstimator(ClsEstimator):
def __init__(self, params=None, data_processor=None, model_path=None):
"""
:param params:
params = {
max_depth: 3,
learning_rate: 0.1,
n_estimators: 1000,
objective: 'binary:logistic',
booster: 'gbtree',
n_jobs: 1,
}
"""
super().__init__(params=params, data_processor=data_processor, model_path=model_path)
if self._model_path:
self.load_model(self._model_path)
else:
self._model = self.model_fn()
def model_fn(self):
from xgboost import XGBClassifier
return XGBClassifier(**self._params) if self._params else XGBClassifier()
def train(self, plot=True, verbose=True, show_features_importance=True, init_model=None):
"""
:param plot:
:param verbose:
:param show_features_importance:
:param init_model:
CatBoost class or string, [default=None]
Continue training starting from the existing model.
If this parameter is a string, load initial model from the path specified by this string.
:return:
"""
from catboost import Pool
import pandas as pd
features, labels, cat_cols = self._data_processor.train_input_fn()
train_data = Pool(data=features,
label=labels,
cat_features=cat_cols)
self._model.fit(train_data, plot=plot, verbose=verbose)
if show_features_importance:
df_features_importance = pd.DataFrame({'name': self._model.feature_names_,
'value': self._model.feature_importances_})
df_features_importance = df_features_importance.sort_values('value', ascending=False)
df_features_importance.reset_index(drop=True, inplace=True)
print(df_features_importance.head(5))
import matplotlib.pyplot as plt
fea_ = df_features_importance.sort_values('value')[df_features_importance.value > 0].value
fea_name = df_features_importance.sort_values('value')[df_features_importance.value > 0].name
plt.figure(figsize=(10, 20))
plt.barh(fea_name, fea_, height=0.5)
plt.show()
return df_features_importance
def evaluate(self, df_val=None, y_val=None, cat_cols=None):
from sklearn.metrics import classification_report
from catboost import Pool
import pandas as pd
if not df_val:
df_val, y_val, cat_cols = self._data_processor.eval_input_fn()
test_data = Pool(data=df_val,
cat_features=cat_cols)
r = self._model.predict(test_data)
print(pd.Series(r).value_counts())
print(classification_report(y_val, r))
dfr = | pd.DataFrame(y_val) | pandas.DataFrame |
import mysql.connector
import pandas as pd
import re
import numpy as np
import csv
class sqlData:
df = None
percent_critical = 0
percent_high = 0
percent_medium = 0
percent_low = 0
total = 0
def __init__(self):
self.sql_df = pd.DataFrame()
self.total = len(self.sql_df) * len(self.sql_df.columns)
def search_dicts(self, key, list_of_dicts):
for item in list_of_dicts:
if key in item.keys():
return item
def get_level(self, level, low, medium, high, critical, score, matched_vals):
if score == critical:
level = 'CRITICAL'
self.percent_critical += len(matched_vals)
if score >= high and score < critical:
level = 'HIGH'
self.percent_high += len(matched_vals)
if score >= medium and score < high:
level = 'MEDIUM'
self.percent_medium += len(matched_vals)
if score <= low:
level = 'LOW'
self.percent_low += len(matched_vals)
return level
#connect to database
def sqldb_to_df(self, db, scores, rules_dict):
host = db[0]
user = db[1]
# password = db[2]
password = ""
database = db[2]
table = db[3]
mydb = mysql.connector.connect(
host=host,
user=user,
password=password,
database=database
)
mycursor = mydb.cursor()
# access column information
mycursor.execute("SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = N'" + table + "'") # e.g. pi
col_schema_result = mycursor.fetchall() # fetchall() method fetches all rows from the last execute statement
col_count = 0
match_count = 0
for col_schema_line in col_schema_result:
#print(col_schema_line)
col_count += 1
for rule in rules_dict: ###
if rule in col_schema_line:
#print(rule, "located at column ", col_count)
mycursor.execute("SELECT " + rule + " FROM " + table)
if match_count == 0:
data = {rule: pd.Series(mycursor.fetchall())}
else:
data[rule] = pd.Series(mycursor.fetchall())
match_count += 1
for key in data.keys():
for i in range(len(data[key])):
if len(data[key][i]) == 1:
data[key][i] = data[key][i][0]
string = str(data[key][i])
data[key][i] = string
self.sql_df = | pd.DataFrame(data) | pandas.DataFrame |
################################################################################
# The contents of this file are Teradata Public Content and have been released
# to the Public Domain.
# <NAME> & <NAME> - April 2020 - v.1.1
# Copyright (c) 2020 by Teradata
# Licensed under BSD; see "license.txt" file in the bundle root folder.
#
################################################################################
# R and Python TechBytes Demo - Part 5: Python in-nodes with SCRIPT
# ------------------------------------------------------------------------------
# File: stoRFFitMM.py
# ------------------------------------------------------------------------------
# The R and Python TechBytes Demo comprises of 5 parts:
# Part 1 consists of only a Powerpoint overview of R and Python in Vantage
# Part 2 demonstrates the Teradata R package tdplyr for clients
# Part 3 demonstrates the Teradata Python package teradataml for clients
# Part 4 demonstrates using R in-nodes with the SCRIPT and ExecR Table Operators
# Part 5 demonstrates using Python in-nodes with the SCRIPT Table Operator
################################################################################
#
# This TechBytes demo utilizes a use case to predict the propensity of a
# financial services customer base to open a credit card account.
#
# The present file is the Python model fitting script to be used with the SCRIPT
# table operator, as described in the following use case 2 of the present demo
# Part 5:
#
# 2) Fitting and scoring multiple models
#
# We utilize the statecode variable as a partition to built a Random
# Forest model for every state. This is done by using SCRIPT Table Operator
# to run a model fitting script with a PARTITION BY statecode in the query.
# This creates a model for each of the CA, NY, TX, IL, AZ, OH and Other
# state codes, and perists the model in the database via CREATE TABLE AS
# statement.
# Then we run a scoring script via the SCRIPT Table Operator against
# these persisted Random Forest models to score the entire data set.
#
# For this use case, we build an analytic data set nearly identical to the
# one in the teradataml demo (Part 3), with one change as indicated by item
# (d) below. This is so we can demonstrate the in-database capability of
# simultaneously building many models.
# 60% of the analytic data set rows are sampled to create a training
# subset. The remaining 40% is used to create a testing/scoring dataset.
# The train and test/score datasets are used in the SCRIPT operations.
################################################################################
# File Changelog
# v.1.0 2019-10-29 First release
# v.1.1 2020-04-02 Added change log; no code changes in present file
################################################################################
import sys
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import pickle
import base64
###
### Read input
###
delimiter = '\t'
inputData = []
for line in sys.stdin.read().splitlines():
line = line.split(delimiter)
inputData.append(line)
###
### If no data received, gracefully exit rather than producing an error later.
###
if not inputData:
sys.exit()
###
### Set up input DataFrame according to input schema
###
# Know your data: You must know in advance the number and data types of the
# incoming columns from the database!
# For numeric columns, the database sends in floats in scientific format with a
# blank space when the exponential is positive; e.g., 1.0 is sent as 1.000E 000.
# The following input data read deals with any such blank spaces in numbers.
columns = ['cust_id', 'tot_income', 'tot_age', 'tot_cust_years', 'tot_children',
'female_ind', 'single_ind', 'married_ind', 'separated_ind',
'statecode', 'ck_acct_ind', 'sv_acct_ind', 'cc_acct_ind',
'ck_avg_bal', 'sv_avg_bal', 'cc_avg_bal', 'ck_avg_tran_amt',
'sv_avg_tran_amt', 'cc_avg_tran_amt', 'q1_trans_cnt',
'q2_trans_cnt', 'q3_trans_cnt', 'q4_trans_cnt', 'SAMPLE_ID']
df = pd.DataFrame(inputData, columns=columns)
del inputData
df['cust_id'] = pd.to_numeric(df['cust_id'])
df['tot_income'] = df['tot_income'].apply(lambda x: "".join(x.split()))
df['tot_income'] = pd.to_numeric(df['tot_income'])
df['tot_age'] = pd.to_numeric(df['tot_age'])
df['tot_cust_years'] = pd.to_numeric(df['tot_cust_years'])
df['tot_children'] = | pd.to_numeric(df['tot_children']) | pandas.to_numeric |
import pathlib
import pandas as pd
from pathlib import Path
from my_module import compressor
from my_module import text_formatter as tf
import re
from sklearn.model_selection import train_test_split
from typing import List
def gen_model_resource(sentences: List[str], labels: List[str]):
x_train, x_test, y_train, y_test = train_test_split(
sentences, labels, test_size=0.1, stratify=labels, random_state=0)
pd.DataFrame({'y_train': y_train, 'x_train': x_train}
).to_csv('train_data.csv', index=False)
pd.DataFrame({'y_test': y_test, 'x_test': x_test}
).to_csv('test_data.csv', index=False)
def aggregate_by_rate(review_path: Path, freq: str = 'M') -> pd.DataFrame:
'''
任意の期間ごとに各評価のレビュー数を集計します
'''
# Read DataFrame.
df = pd.read_csv(review_path)
# Delete 'comments' colum.
df.drop(columns=['comments', 'votes'], inplace=True)
# Convert 'dates' colum into DateTime type
df['dates'] = pd.to_datetime(df['dates'])
# Set index to 'dates' coulum.
df.set_index('dates', inplace=True)
# Aggregate review by any period.
df = df.groupby(pd.Grouper(freq=freq))
# Generate template.
df_result = pd.DataFrame(columns=range(1, 6))
for period, group in df:
# 評価ごとにレビュー数を集計.インデックスを昇順にソート.行と列の入れ替え
group = group.apply(pd.value_counts).sort_index().T
# Rename Index to Period.
group = group.rename(index={'rates': period})
# Merge DataFrame.
df_result = pd.concat([df_result, group])
# Replace Nan in DataFrame with 0.
df_result.fillna(0, inplace=True)
# Formats DateTimeIndex to '%y-%m-%d'and converts it to String type.
df_result.index = df_result.index.strftime('%y-%m-%d')
# Insert total colum.
df_result[15] = df_result.sum(axis=1).astype('int64')
# Reduce memory usage for pandas DataFrame.
df_result = compressor.reduce_mem_usage(df_result)
# Return
return df_result
# 任意の分割数で分割
# print((np.array_split(df['rates'].values, 3)))
def fmt_reviews(review_path: Path):
'''
レビューCSVから日付、レート、コメントを抽出してCSVへ出力します。
'''
# フォーマットしたファイルのパスをセット
formatted_file_path = review_path.with_name(f'f{review_path.name}')
# CSVを分割読込み
df = pd.read_csv(review_path)
# 処理に使用する列を抽出
df = df[['dates', 'comments', 'rates', 'votes']]
# 英文のレビューを削除
pattern = r'[a-zA-Z0-9\W_]*$'
repatter = re.compile(pattern)
drop_index = df.index[df['comments'].str.match(repatter)]
df.drop(drop_index, inplace=True)
# コメントを整形
df['comments'] = df['comments'].apply(tf.fmt_comments)
# 日付をdatetime形式へ変換
df['dates'] = df['dates'].apply(tf.convert_datetime_date)
# 評価の数値を抽出
df['rates'] = df['rates'].apply(tf.extract_rate)
# 投票数を抽出
df.fillna({'votes': 0}, inplace=True)
df['votes'] = df['votes'].apply(tf.extract_vote).astype('int8')
# 日付をインデックスへセット
df.set_index('dates', inplace=True)
df.to_csv(formatted_file_path)
def fmt_labeled_review(review_path: Path):
# ラベルを付与したCSVの出力先
labeled_file_path = review_path.with_name(
f'{review_path.stem}_labeled.csv')
# CSVの読み込み
df = pd.read_csv(review_path)
# 必要な情報以外を削除
df = df[['comments', 'rates']]
# ラベルを付与
df['label'] = df['rates'].apply(lambda x: f'__label__{x}')
df = df[['label', 'comments']]
# CSVへ出力
df.to_csv(labeled_file_path, index=False)
def fmt_labeled_review2(review_path: Path):
def add_label(rate: int):
if rate == 4:
return '__label__1'
elif rate <= 2:
return '__label__0'
# ラベルを付与したCSVの出力先
labeled_file_path = review_path.with_name(
f'{review_path.stem}_labeled2.csv')
# CSVの読み込み
df = pd.read_csv(review_path)
# 必要な情報以外を削除
df = df[['comments', 'rates']]
df = df[df['rates'].isin([1, 2, 4])]
# ラベルを付与
df['label'] = df['rates'].apply(add_label)
df = df[['label', 'comments']]
# CSVへ出力
df.to_csv(labeled_file_path, index=False)
def fmt_labeled_sent(sent_path: Path, boundary: float = 0.2):
# ラベルを付与したCSVの出力先
labeled_file_path = sent_path.with_name(
f'{sent_path.stem}_labeled.csv')
# CSVの読み込み
df = pd.read_csv(sent_path)
# 列を抽出
df = df[['content', 'score']]
# 無極性の行を削除
df = df[df['score'] != 0]
# 極性値の絶対値が境界値未満の行を削除
df = df[abs(df['score']) >= boundary]
# スコアをラベリング
df['score'] = df['score'].apply(lambda x: 1 if x > 0 else 0)
# ラベルを付与
df['label'] = df['score'].apply(lambda x: f'__label__{x}')
# 列を抽出
df = df[['label', 'content']]
# CSVへ出力
df.to_csv(labeled_file_path, index=False)
def split_review_by_sent(path: Path):
df = | pd.read_csv(path) | pandas.read_csv |
#!/usr/bin/env python3
import os
import pickle as pkl
import pandas as pd
import shutil
import subprocess
from collections import defaultdict
import pkg_resources
from datetime import datetime
import tarfile
import gc
from camaptools.MLP import load_models
from camaptools.EnhancedFutures import EnhancedProcessPoolExecutor
from camaptools.utils import OUTPUT_FOLDER
class Peptides(object):
def __init__(self, genome, context=162, workers=0, executor=None):
self.output_folder = OUTPUT_FOLDER
self.genome = genome
assert 'GRCh' in genome or 'GRCm' in genome
self.out_dir = os.path.join(self.output_folder, 'allPeptides_%s' % self.genome)
self.species = 'Human' if 'GRCh' in genome else 'Mouse'
self.context = context
# detect pickle peptide files
pepfiles = [os.path.join(self.out_dir, f) for f in os.listdir(self.out_dir) if f[:8] == 'peptides']
self.pepfiles = pepfiles
self.workers = workers
self.executor = EnhancedProcessPoolExecutor if executor is None else executor
def annotate(self, overwrite=False, algorithms=None, parameters=None):
self.model_names, self.models = load_models(self.context, algorithms, parameters)
self.overwrite = overwrite
with self.executor(max_workers=self.workers, use_threads=True) as ex:
counts = sum(list(ex.map(self._annotate, self.pepfiles)))
pfile = os.path.join(self.out_dir, 'info.pkl')
info = pkl.load(open(pfile, 'rb'))
edited = False
if 'pyTorchVersion' not in info:
edited = True
info['pyTorchVersion'] = pkg_resources.get_distribution("torch").version
if 'CAMAPModels' not in info:
edited = True
info['modelANN'] = self.model_names
if 'numberPeptideContexts' not in info:
edited = True
info['numberPeptideContexts'] = counts
if edited:
info['date'] = datetime.now()
self._keep_a_copy(pfile)
pkl.dump(info, open(pfile, 'wb'))
def _annotate(self, pfile, chunk_size=10000):
"""
Assumes sequences are never modified, will always run the same number of models
on all sequence chunks.
Assumes context size is the same for all models.
"""
print(pfile)
models = self.models
def run_models():
counter_ix, s_list = seqs.keys(), seqs.values()
c, m, p = list(jobs)[0]
any_model = models[c][m][p][0]
s_embeds = any_model._get_embeds(s_list)
for c, m, p in jobs:
for i in counter_ix:
seq_scores[i][(m, c, p)] = []
for model in models[c][m][p]:
scores = model.get_score(s_embeds)
for i, s in zip(counter_ix, scores):
seq_scores[i][(m, c, p)].append(float(s[1]))
pepdict = pkl.load(open(pfile, 'rb'))
seq_scores = {}
seq_ids = {}
seqs = {}
jobs = set()
counter = 0
for pep, gene_dict in pepdict.items():
gene_dict = gene_dict['genes']
for gene_name, entries in gene_dict.items():
for entry_i, entry in enumerate(entries):
seq = entry['sequenceContext']
if '!GA' not in seq:
counter += 1
seq_ids[counter] = (pep, gene_name, entry_i)
seq_scores[counter] = {}
seqs[counter] = seq
if self.overwrite:
del entry['CAMAPScore']
for context in models:
for method in models[context]:
for params in models[context][method]:
if 'CAMAPScore' not in entry:
entry['CAMAPScore'] = {}
if method not in entry['CAMAPScore']:
entry['CAMAPScore'][method] = {}
if context not in entry['CAMAPScore'][method]:
entry['CAMAPScore'][method][context] = {}
if params not in entry['CAMAPScore'][method][context]:
entry['CAMAPScore'][method][context][params] = []
jobs.add((context, method, params))
if len(seqs) == chunk_size:
run_models() # will get jobs and seqs values from the outer scope
jobs = set()
seqs = {}
gc.collect()
#if counter >= 1000:
# break
if jobs:
run_models()
# Fill dictionnary
edited = False
for counter_ix in seq_scores:
pep, gene_name, entry_i = seq_ids[counter_ix]
for (m, c, p), scores in seq_scores[counter_ix].items():
pepdict[pep]['genes'][gene_name][entry_i]['CAMAPScore'][m][c][p] = scores
edited = True
if edited:
self._keep_a_copy(pfile)
pkl.dump(pepdict, open(pfile, 'wb'))
return counter
def merge_netmhc(self, overwrite=False):
self.overwrite = overwrite
self.columns_to_keep = ['Peptide', 'nM', 'Rank']
netmhc_folder = 'NetMHCpan-4.0a'
nmpan_out = os.path.join(self.out_dir, netmhc_folder)
nmpan_pred_dir = os.path.join(nmpan_out, 'predictions')
if not os.path.exists(nmpan_out):
if os.path.exists(nmpan_out + '.tar.gz'):
sys.stdout.write('Predictions folder already compressed. Uncompress if you want to rerun analysis.\n')
return
else:
sys.stderr.write('ERROR: Predictions folder not found.\n')
sys.exit(1)
# Prepare jobs
job_dct = defaultdict(lambda: [[], []])
for fn_allele in os.listdir(nmpan_pred_dir):
pinit, plen = fn_allele.split('.')[:2]
pfile = os.path.join(self.out_dir, 'peptides_%s%s.pkl' % (pinit, plen))
if fn_allele.split('.')[3] == 'tsv':
job_dct[pfile][0].append(os.path.join(nmpan_pred_dir, fn_allele))
elif fn_allele.split('.')[3] == 'NP':
job_dct[pfile][1].append(os.path.join(nmpan_pred_dir, fn_allele))
# Run jobs
with self.executor(max_workers=self.workers, use_threads=False) as ex:
allele_sets = ex.map(self._merge_netmhc, tuple(job_dct.keys()), *zip(*job_dct.values()))
exit_code = self._tar_compress(nmpan_out) # run while peptide files are being filled
# Update info file
alleles = set()
for al in allele_sets:
alleles.update(al)
pfile = os.path.join(self.out_dir, 'info.pkl')
info = pkl.load(open(pfile, 'rb'))
edited = False
if 'allelesNetMHC' not in info:
info['NetMHCVersion'] = set([netmhc_folder])
edited = True
if 'allelesNetMHC' not in info:
info['allelesNetMHC'] = alleles
edited = True
if edited:
info['date'] = datetime.now()
self._keep_a_copy(pfile)
pkl.dump(info, open(pfile, 'wb'))
# Delete NetMHC folder if compression was successful
if exit_code:
print('WARNING: tar compression failed for some reason')
else:
shutil.rmtree(nmpan_out)
def _merge_netmhc(self, pfile, netmhc_ba_out_list, netmhc_np_out_list):
print(pfile)
pepdict = pkl.load(open(pfile, 'rb'))
edited = False
alleles = set()
def concat(file_list):
dfs = []
for fn in file_list:
with open(fn, 'r') as f:
allele = f.readline().strip().replace(':', '')
temp_df = pd.read_csv(f, sep='\t', header=0, usecols=self.columns_to_keep, index_col=0)
temp_df.columns = pd.MultiIndex.from_product([[allele], temp_df.columns])
dfs.append(temp_df)
df = pd.concat(dfs, axis=1, sort=False, copy=False)
return df
ba_df = concat(netmhc_ba_out_list)
np_df = concat(netmhc_np_out_list)
np_df = np_df.drop('nM', axis=1, level=1)
np_df.columns = np_df.columns.set_levels(np_df.columns.levels[1] + '_NP', level=1, verify_integrity=False)
df_full = | pd.concat([ba_df, np_df], axis=1, sort=False, copy=False) | pandas.concat |
import pandas_ta as ta
import pandas as pd
import settings
from datetime import datetime as dt
def AddStrategy(df):
df = df.drop_duplicates(subset=['open_time'])
df['open_time'] = pd.to_datetime(df['open_time'], unit='s')
df.set_index( | pd.DatetimeIndex(df["open_time"]) | pandas.DatetimeIndex |
#---
# title: Zen Markdown Demo
# author: Dr. <NAME>
# date: CURRENT_DATE
# output:
# format: pdf
# latex:
# header: \usepackage{booktabs}
#---
## with class
# %%{run=true, echo=true}
class test:
def __init__(self):
print("hi")
def test(self):
print("hi")
t = test()
t.test()
#```
# %%{run=true, echo=true, render=false}
import numpy as np
import matplotlib.pyplot as plt
#```
## Lax example
# $$ \begin{pmatrix}
# x^2 + y^2 &= 1 \\
# y &= \sqrt{1 - x^2}
# \end{pmatrix}$$
# %%{run=true, echo=true, render=true}
import matplotlib.pyplot as plt
import numpy as np
xpoints = np.array([1, 8])
ypoints = np.array([3, 10])
print(xpoints)
print(ypoints)
#```
# Matplot lib example
# %%{run=true, echo=true, render=true}
plt.plot(xpoints, ypoints, 'o')
#```
# %%{run=true, echo=true, render=true}
d = {'col1': [1, 2], 'col2': [3, 4]}
df = pd.DataFrame(data=d)
df.to_markdown()
#
#```
# %%{run=true, echo=true, render=true}
import pandas as pd
d = {'col1': [1, 2], 'col2': [3, 4]}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import mars.dataframe as md
import mars.tensor as mt
from mars.tests.core import TestBase
class Test(TestBase):
def setUp(self):
super().setUp()
self.ctx, self.executor = self._create_test_context()
def testDataFrameInitializer(self):
# from tensor
raw = np.random.rand(100, 10)
tensor = mt.tensor(raw, chunk_size=7)
r = md.DataFrame(tensor)
result = self.executor.execute_dataframe(r, concat=True)[0]
pd.testing.assert_frame_equal(result, pd.DataFrame(raw))
r = md.DataFrame(tensor, chunk_size=13)
result = self.executor.execute_dataframe(r, concat=True)[0]
pd.testing.assert_frame_equal(result, pd.DataFrame(raw))
# from Mars dataframe
raw = pd.DataFrame(np.random.rand(100, 10), columns=list('ABCDEFGHIJ'))
df = md.DataFrame(raw, chunk_size=15) * 2
r = md.DataFrame(df, num_partitions=11)
results = self.executor.execute_dataframe(r)
self.assertEqual(len(results), 10)
pd.testing.assert_frame_equal(pd.concat(results), raw * 2)
# from tileable dict
raw_dict = {
'C': np.random.choice(['u', 'v', 'w'], size=(100,)),
'A': pd.Series(np.random.rand(100)),
'B': np.random.randint(0, 10, size=(100,)),
}
m_dict = raw_dict.copy()
m_dict['A'] = md.Series(m_dict['A'])
m_dict['B'] = mt.tensor(m_dict['B'])
r = md.DataFrame(m_dict, columns=list('ABC'))
result = self.executor.execute_dataframe(r, concat=True)[0]
pd.testing.assert_frame_equal(result, pd.DataFrame(raw_dict, columns=list('ABC')))
# from raw pandas initializer
raw = pd.DataFrame(np.random.rand(100, 10), columns=list('ABCDEFGHIJ'))
r = md.DataFrame(raw, num_partitions=10)
results = self.executor.execute_dataframe(r)
self.assertEqual(len(results), 10)
pd.testing.assert_frame_equal(pd.concat(results), raw)
def testSeriesInitializer(self):
# from tensor
raw = np.random.rand(100)
tensor = mt.tensor(raw, chunk_size=7)
r = md.Series(tensor)
result = self.executor.execute_dataframe(r, concat=True)[0]
pd.testing.assert_series_equal(result, pd.Series(raw))
r = md.Series(tensor, chunk_size=13)
result = self.executor.execute_dataframe(r, concat=True)[0]
pd.testing.assert_series_equal(result, pd.Series(raw))
# from index
raw = np.arange(100)
np.random.shuffle(raw)
raw = pd.Index(raw, name='idx_name')
idx = md.Index(raw, chunk_size=7)
r = md.Series(idx)
result = self.executor.execute_dataframe(r, concat=True)[0]
pd.testing.assert_series_equal(result, pd.Series(raw))
# from Mars series
raw = pd.Series(np.random.rand(100), name='series_name')
ms = md.Series(raw, chunk_size=15) * 2
r = md.Series(ms, num_partitions=11)
results = self.executor.execute_dataframe(r)
self.assertEqual(len(results), 10)
pd.testing.assert_series_equal(pd.concat(results), raw * 2)
# from raw pandas initializer
raw = pd.Series(np.random.rand(100), name='series_name')
r = md.Series(raw, num_partitions=10)
results = self.executor.execute_dataframe(r)
self.assertEqual(len(results), 10)
pd.testing.assert_series_equal(pd.concat(results), raw)
def testIndexInitializer(self):
def _concat_idx(results):
s_results = [pd.Series(idx) for idx in results]
return pd.Index(pd.concat(s_results))
# from tensor
raw = np.arange(100)
np.random.shuffle(raw)
tensor = mt.tensor(raw)
r = md.Index(tensor, chunk_size=7)
result = self.executor.execute_dataframe(r, concat=True)[0]
pd.testing.assert_index_equal(result, | pd.Index(raw) | pandas.Index |
from datetime import datetime, timedelta
import json
import re
import arrow
import pandas as pd
import requests
from import_data.helpers import end_of_month
from import_data import googlefit_parse_utils
from import_data.garmin.parse import user_map_to_timeseries
WEEKS_BEFORE_SICK = 3
WEEKS_AFTER_SICK = 2
def oura_parser(oura_object, event_start, event_end=False):
if not event_end:
event_end = event_start
oura = json.loads(requests.get(oura_object["download_url"]).content)
start_date = arrow.get(event_start).floor("day")
end_date = arrow.get(event_end).ceil("day")
period_start = start_date.shift(weeks=WEEKS_BEFORE_SICK * -1)
period_end = end_date.shift(weeks=WEEKS_AFTER_SICK)
returned_hr_data = []
returned_temp_data = []
returned_respiratory_rate_data = []
# p is start_date!
if "sleep" in oura.keys():
for entry in oura["sleep"]:
sdate = arrow.get(entry["summary_date"])
# Use this data if it falls within our target window.
if sdate >= period_start and sdate <= period_end:
record_time = arrow.get(entry["bedtime_start"])
temperature_delta = entry.get("temperature_delta", 0)
respiratory_rate = entry.get("breath_average", 0)
hr = entry.get("hr_lowest", 0)
returned_temp_data.append(
{
"timestamp": sdate.format("YYYY-MM-DD"),
"data": {
"temperature_delta": temperature_delta,
"respiratory_rate": respiratory_rate,
"heart_rate": hr
},
}
)
for hr in entry["hr_5min"]:
if int(hr) != 0:
returned_hr_data.append(
{
"timestamp": record_time.isoformat(),
"data": {"heart_rate": hr},
}
)
record_time = record_time.shift(minutes=+5)
return returned_hr_data, returned_temp_data
else:
return None, None
def fitbit_parser(fitbit_info, event_start, event_end=None):
if not event_end:
event_end = event_start
fitbit_data = json.loads(requests.get(fitbit_info["download_url"]).content)
start_date = arrow.get(event_start)
end_date = arrow.get(event_end)
period_start = start_date.shift(weeks=WEEKS_BEFORE_SICK * -1)
period_end = end_date.shift(weeks=WEEKS_AFTER_SICK)
returned_fitbit_data = []
for month in fitbit_data["heart"]:
for entry in fitbit_data["heart"][month]["activities-heart"]:
sdate = arrow.get(entry["dateTime"])
if sdate >= period_start and sdate <= period_end:
returned_fitbit_data.append(
{
"timestamp": entry["dateTime"],
"data": {
"heart_rate": entry["value"].get("restingHeartRate", "-")
},
}
)
return returned_fitbit_data
def apple_health_parser(apple_health_info, event_start, event_end=None):
if not event_end:
event_end = event_start
apple_health_data = requests.get(apple_health_info["download_url"]).text
apple_health_data = apple_health_data.split("\n")
start_date = arrow.get(event_start)
end_date = arrow.get(event_end)
period_start = start_date.shift(weeks=WEEKS_BEFORE_SICK * -1)
period_end = end_date.shift(weeks=WEEKS_AFTER_SICK)
returned_apple_data = []
for entry in apple_health_data:
if entry.endswith("R"):
entry = entry.split(",")
sdate = arrow.get(entry[1])
if sdate >= period_start and sdate <= period_end:
returned_apple_data.append(
{"timestamp": entry[1], "data": {"heart_rate": entry[0]}}
)
returned_apple_data.reverse() # invert list as CSV is newest to oldest
return returned_apple_data
def googlefit_to_qf(json_data, min_date, max_date):
res = []
data = json_data
df, _ = googlefit_parse_utils.get_dataframe_with_all(data)
if df is None:
return res
# we can have multiple sources of heart rate data. will get the one with the most data per day
ds_with_most_data = None
max_data_points = 0
for col_name in df.columns:
# the col_name should be of the format heart_rate.bpm.INT
# named this way by the parsing functionality in get_dataframe_with_all
if "heart_rate" not in col_name:
continue
data_points = len(df[col_name].dropna())
if data_points > max_data_points:
max_data_points = data_points
ds_with_most_data = col_name
if ds_with_most_data is None:
return None
series = df[ds_with_most_data].dropna()
for ts, value in zip(series.index, series.values):
ts = ts.to_pydatetime()
if ts < min_date or ts > max_date:
continue
rec = {"timestamp": ts.isoformat(), "data": {"heart_rate": value}}
res.append(rec)
return res
def googlefit_parser(googlefit_files_info, event_start, event_end=None):
print(event_start)
if event_end is None:
event_end = event_start
event_start = datetime(
event_start.year, event_start.month, event_start.day, 0, 0, 0
)
event_end = datetime(event_end.year, event_end.month, event_end.day, 23, 59, 59)
min_date = event_start - timedelta(days=21)
max_date = event_end + timedelta(days=14)
returned_googlefit_data = []
for info in googlefit_files_info:
basename = info["basename"]
# googlefit_2018-12.json
start_month = datetime.strptime(basename, "googlefit_%Y-%m.json")
end_month = end_of_month(start_month)
# file doesn't contain relevant data for our range, skip
if min_date > end_month:
continue
if max_date < start_month:
continue
print("looking at {}".format(basename))
googlefit_json = json.loads(requests.get(info["download_url"]).content)
data_in_qf_format = googlefit_to_qf(googlefit_json, min_date, max_date)
if data_in_qf_format:
returned_googlefit_data += data_in_qf_format
return returned_googlefit_data
def garmin_parser(garmin_file_info, event_start, event_end=None):
print(event_start)
if event_end is None:
event_end = event_start
event_start = datetime(
event_start.year, event_start.month, event_start.day, 0, 0, 0
)
event_end = datetime(event_end.year, event_end.month, event_end.day, 23, 59, 59)
min_date = event_start - timedelta(days=21)
max_date = event_end + timedelta(days=14)
print(garmin_file_info)
garmin_json = json.loads(requests.get(garmin_file_info["download_url"]).content)
data_in_qf_format = garmin_to_qf(garmin_json, min_date, max_date)
return data_in_qf_format
def garmin_to_qf(json_data, min_date, max_date):
res = []
data = json_data
series = user_map_to_timeseries(data)
for dt, value in zip(series.index, series.values):
if dt < min_date or dt > max_date:
continue
rec = {"timestamp": dt.isoformat(), "data": {"heart_rate": int(value)}}
res.append(rec)
return res
def fitbit_intraday_parser(
fitbit_data, fitbit_intraday_files, event_start, event_end=None
):
"""
Return intraday heartrate data for sleep periods in target period.
"""
if not event_end:
event_end = event_start
fitbit_data = requests.get(fitbit_data["download_url"]).json()
start_date = arrow.get(event_start)
end_date = arrow.get(event_end)
period_start = start_date.shift(weeks=WEEKS_BEFORE_SICK * -1)
period_end = end_date.shift(weeks=WEEKS_AFTER_SICK)
fitbit_sleep_data = {"start_times": [], "minutes": [], "periods": {}}
returned_fitbit_data = {}
# Calculate and store sleep periods in target time period.
for year in fitbit_data["sleep-start-time"]:
for entry in fitbit_data["sleep-start-time"][year]["sleep-startTime"]:
sdate = arrow.get(entry["dateTime"])
if sdate >= period_start and sdate <= period_end:
fitbit_sleep_data["start_times"].append(entry)
for year in fitbit_data["sleep-minutes"]:
for entry in fitbit_data["sleep-minutes"][year]["sleep-minutesAsleep"]:
sdate = arrow.get(entry["dateTime"])
if sdate >= period_start and sdate <= period_end:
fitbit_sleep_data["minutes"].append(entry)
for entry in fitbit_sleep_data["start_times"]:
if entry.get("value", None):
fitbit_sleep_data["periods"][entry["dateTime"]] = {
"start": arrow.get(entry["dateTime"] + " " + entry["value"])
}
for entry in fitbit_sleep_data["minutes"]:
sleep_entry = fitbit_sleep_data["periods"].get(entry["dateTime"], None)
if sleep_entry:
sleep_entry["end"] = sleep_entry["start"].shift(minutes=int(entry["value"]))
# Get all potentially needed heartrate data.
hr_data = []
for file_info in fitbit_intraday_files:
try:
yearmonth = re.search(
"fitbit-intraday-(.*?)\.json", file_info["basename"]
).groups()[0]
yearmonth = arrow.get(yearmonth)
if yearmonth.floor(
"month"
) <= period_end and period_start <= yearmonth.ceil("month"):
data = json.loads(requests.get(file_info["download_url"]).content)
hr_data = hr_data + data["activities-heart-intraday"]
except AttributeError:
continue
# load into dataframe
rows = []
for hr_point in hr_data:
hr_date = hr_point["date"]
for hr_datapoint in hr_point["dataset"]:
rows.append((hr_date + " " + hr_datapoint["time"], hr_datapoint["value"]))
hr_dataframe = | pd.DataFrame(rows, columns=["timestamp", "heart_rate"]) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import numpy as np
import pickle
from sklearn.ensemble import RandomForestClassifier
st.write("""
# Penguin Prediction App
This app predicts the **Palmer Penguins** species!
""")
st.sidebar.header("User Input Features")
st.sidebar.markdown("""
[Example CSV input File](https://raw.githubusercontent.com/dataprofessor/data/master/penguins_example.csv)
""")
upload_file = st.sidebar.file_uploader("Upload your input csv file", type = ['csv'])
if upload_file is not None:
input_df = | pd.read_csv(upload_file) | pandas.read_csv |
import os
import pickle
import sys
from pathlib import Path
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from thoipapy.utils import convert_truelike_to_bool, convert_falselike_to_bool
import thoipapy
def fig_plot_BOcurve_mult_train_datasets(s):
"""Plot the BO-curve for multiple training datasets.
Takes the datasets listed in settings under "train_datasets" and "test_datasets"
and plots the BO-curve of each combination in a single figure.
The Area Under the BO Curve for a sample size of 0 to 10 (AUBOC) is shown in the legend.
Currently plots both the new and old performance method.
NEW METHOD
----------
Performance = overlap between experiment and predicted MINUS the overlap expected in random selections
OLD METHOD
----------
Performance = overlap between experiment and predicted DIVIDED BY the overlap expected in random selections
Parameters
----------
s : dict
Settings dictionary for figures.
"""
# plt.rcParams.update({'font.size': 7})
test_set_list, train_set_list = thoipapy.utils.get_test_and_train_set_lists(s)
test_dataset_str = "-".join([str(n) for n in test_set_list])
train_dataset_str = "-".join([str(n) for n in train_set_list])
mult_testname = "testsets({})_trainsets({})".format(test_dataset_str, train_dataset_str)
sys.stdout.write(mult_testname)
mult_THOIPA_dir = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "summaries", mult_testname)
thoipapy.utils.make_sure_path_exists(mult_THOIPA_dir)
plot_BOcurve(s, train_set_list, test_set_list, mult_THOIPA_dir, mult_testname)
plot_BOcurve(s, train_set_list, test_set_list, mult_THOIPA_dir, mult_testname, sheet_name="df_o_over_r", suffix="_BO_curve_old_method")
def plot_BOcurve(s, train_set_list, test_set_list, mult_THOIPA_dir, mult_testname, sheet_name="df_o_minus_r", suffix="_BO_curve"):
""" Separate function allowing a toggle of the OLD or NEW performance methods
Parameters
----------
s : dict
Settings dictionary for figures.
train_set_list : list
List of training datasets in selection
E.g. ["set02", "set04"]
test_set_list : list
List of test datasets in selection
E.g. ["set03", "set31"]
mult_THOIPA_dir : str
Path to folder containing results for multiple THOIPA comparisons.
mult_testname : str
String denoting this combination of test and training datasets
E.g. testsets(2)_trainsets(2)
sheet_name : str
Excel sheet_name
This is the toggle deciding whether the OLD or NEW performance measure is used
Default = new method ("df_o_minus_r"), where the overlap MINUS random_overlap is used
suffix : str
Suffix for figure
E.g. "" or "_old_method_o_over_r"
"""
BO_curve_png = os.path.join(mult_THOIPA_dir, "{}{}.png".format(mult_testname, suffix))
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
for train_set in train_set_list:
trainsetname = "set{:02d}".format(int(train_set))
for test_set in test_set_list:
testsetname = "set{:02d}".format(int(test_set))
# /media/mark/sindy/m_data/THOIPA_data/results/Bo_Curve/Testset03_Trainset01.THOIPA.validation/bocurve_data.xlsx
bocurve_data_xlsx = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "data", "Test{}_Train{}.THOIPA".format(testsetname, trainsetname), "data", "bocurve_data.xlsx")
df = pd.read_excel(bocurve_data_xlsx, sheet_name=sheet_name, index_col=0)
df["mean_"] = df.mean(axis=1)
# apply cutoff (e.g. 5 residues for AUBOC5)
auboc_ser = df["mean_"].iloc[:s["n_residues_AUBOC_validation"]]
# use the composite trapezoidal rule to get the area under the curve
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.trapz.html
AUBOC = np.trapz(y=auboc_ser, x=auboc_ser.index)
df["mean_"].plot(ax=ax, label="Test{}_Train{}(AUBOC={:0.1f})".format(testsetname, trainsetname, AUBOC))
ax.set_xlabel("sample size")
ax.set_ylabel("performance\n(observed overlap - random overlap)")
ax.set_xticks(range(1, df.shape[0] + 1))
ax.set_xticklabels(df.index)
ax.legend()
fig.tight_layout()
fig.savefig(BO_curve_png, dpi=240)
# fig.savefig(thoipapy.utils.pdf_subpath(BO_curve_png))
sys.stdout.write("\nfig_plot_BO_curve_mult_train_datasets finished ({})".format(BO_curve_png))
def compare_selected_predictors(s, logging):
"""Plot the BO-curve for multiple prediction methods
Takes the datasets listed in settings under the "selected_predictors" tab
(e.g. ["Testset03_Trainset04.THOIPA","Testset03.LIPS"])
and plots the BO-curves in a single figure.
The Area Under the BO Curve for a sample size of 0 to 10 (AUBOC) is shown in the legend.
Currently plots both the new and old performance method.
Performance is measured with the NEW METHOD:
Performance = overlap between experiment and predicted MINUS the overlap expected in random selections
Parameters
----------
s : dict
Settings dictionary for figures.
"""
# if s["set_number"] != s["test_datasets"]:
# raise Exception("set_number and test_datasets are not identical in settings file. This is recommended for test/train validation.")
# plt.rcParams.update({'font.size': 7})
logging.info("\n--------------- starting compare_selected_predictors ---------------\n")
BO_curve_png: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/blindvalidation/compare_selected_predictors_BO_curve.png"
AUBOC_bar_png: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/blindvalidation/compare_selected_predictors_AUBOC_barchart.png"
ROC_png: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/blindvalidation/compare_selected_predictors_ROC.png"
thoipapy.utils.make_sure_path_exists(BO_curve_png, isfile=True)
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
predictors_df = pd.read_excel(s["settings_path"], sheet_name="selected_predictors")
predictors_df["include"] = predictors_df["include"].apply(convert_truelike_to_bool, convert_nontrue=False)
predictors_df["include"] = predictors_df["include"].apply(convert_falselike_to_bool)
predictors_df = predictors_df.loc[predictors_df.include == True]
predictor_list = predictors_df.predictor.tolist()
area_under_curve_dict = {}
# create an empty dataframe to keep the pycharm IDE happy
df = pd.DataFrame()
for predictor_name in predictor_list:
bocurve_data_xlsx: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/data/{s['setname']}_thoipa_loo_bo_curve_data.xlsx"
if not os.path.isfile(bocurve_data_xlsx):
raise FileNotFoundError("bocurve_data_xlsx does not exist ({}). Try running run_testset_trainset_validation".format(bocurve_data_xlsx))
df = pd.read_excel(bocurve_data_xlsx, sheet_name="df_o_minus_r", index_col=0)
df["mean_"] = df.mean(axis=1)
# apply cutoff (e.g. 5 residues for AUBOC5)
auboc_ser = df["mean_"].iloc[:s["n_residues_AUBOC_validation"]]
# use the composite trapezoidal rule to get the area under the curve
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.trapz.html
AUBOC = np.trapz(y=auboc_ser, x=auboc_ser.index)
area_under_curve_dict[predictor_name] = AUBOC
df["mean_"].plot(ax=ax, label="{}(AUBOC={:0.1f})".format(predictor_name, AUBOC))
ax.set_xlabel("sample size")
ax.set_ylabel("performance\n(observed overlap - random overlap)")
ax.set_xticks(range(1, df.shape[0] + 1))
ax.set_xticklabels(df.index)
ax.legend()
fig.tight_layout()
fig.savefig(BO_curve_png, dpi=240)
# fig.savefig(thoipapy.utils.pdf_subpath(BO_curve_png))
plt.close("all")
AUBOC_ser = pd.Series(area_under_curve_dict).sort_index()
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
AUBOC_ser.plot(ax=ax, kind="bar")
ax.set_ylabel("performance (AUBOC)")
fig.tight_layout()
fig.savefig(AUBOC_bar_png, dpi=240)
# fig.savefig(thoipapy.utils.pdf_subpath(AUBOC_bar_png))
plt.close("all")
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
for predictor_name in predictor_list:
# "D:\data_thoipapy\results\compare_testset_trainset\data\Testset03_Trainset04.THOIPA\Testset03_Trainset04.THOIPA.ROC_data.pkl"
# ROC_pkl = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "data", predictor_name, "data", "{}.ROC_data.pkl".format(predictor_name))
testsetname = "set{:02d}".format(int(s['test_datasets']))
ROC_pkl = Path(s["data_dir"]) / "results" / testsetname / f"blindvalidation/{predictor_name}/ROC_data.pkl"
if os.path.isfile(ROC_pkl):
with open(ROC_pkl, "rb") as f:
ROC_out_dict = pickle.load(f)
ax.plot(ROC_out_dict["false_positive_rate_mean"], ROC_out_dict["true_positive_rate_mean"], label='{} ({:0.2f})'.format(predictor_name, ROC_out_dict["mean_roc_auc"]), lw=1.5)
else:
sys.stdout.write("\nPICKLE WITH ROC DATA NOT FOUND : {}".format(ROC_pkl))
continue
ax.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='random')
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
ax.set_xlabel("False positive rate")
ax.set_ylabel("True positive rate")
ax.legend(loc="lower right")
fig.tight_layout()
fig.savefig(ROC_png, dpi=240)
# fig.savefig(thoipapy.utils.pdf_subpath(ROC_png))
sys.stdout.write("\nBO_curve_png ({})\n".format(BO_curve_png))
logging.info("\n--------------- finished compare_selected_predictors ---------------\n")
def combine_BOcurve_files_hardlinked(s):
Train04_Test01_BoCurve_file = r"D:\THOIPA_data\results\Bo_Curve\Trainset04_Testset01.bocurve.csv"
df41 = pd.read_csv(Train04_Test01_BoCurve_file, index_col=0)
df41_ratio = df41[df41.parameters == "observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df41[df41.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df41_ratio_df = df41_ratio.to_frame(name="Tr4Te1Ratio")
df41_LIPS_ratio = df41[df41.parameters == "LIPS_observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df41[df41.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df41_LIPS_ratio_df = df41_LIPS_ratio.to_frame(name="Tr4Te1LIPSRatio")
Train04_Test02_BoCurve_file = r"D:\THOIPA_data\results\Bo_Curve\Trainset04_Testset02.bocurve.csv"
df42 = pd.read_csv(Train04_Test02_BoCurve_file, index_col=0)
df42_ratio = df42[df42.parameters == "observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df42[df42.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df42_ratio_df = df42_ratio.to_frame(name="Tra4Tes2Ratio")
df42_LIPS_ratio = df42[df42.parameters == "LIPS_observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df42[df42.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df42_LIPS_ratio_df = df42_LIPS_ratio.to_frame(name="Tr4Te2LIPSRatio")
Train04_Test03_BoCurve_file = r"D:\THOIPA_data\results\Bo_Curve\Trainset04_Testset03.bocurve.csv"
df43 = pd.read_csv(Train04_Test03_BoCurve_file, index_col=0)
df43_ratio = df43[df43.parameters == "observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df43[df43.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df43_ratio_df = df43_ratio.to_frame(name="Tra4Tes3Ratio")
df43_LIPS_ratio = df43[df43.parameters == "LIPS_observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df43[df43.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df43_LIPS_ratio_df = df43_LIPS_ratio.to_frame(name="Tr4Te3LIPSRatio")
Train02_Test01_BoCurve_file = r"D:\THOIPA_data\results\Bo_Curve\Trainset02_Testset01.bocurve.csv"
df21 = pd.read_csv(Train02_Test01_BoCurve_file, index_col=0)
df21_ratio = df21[df21.parameters == "observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df21[df21.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df21_ratio_df = df21_ratio.to_frame(name="Tra2Te1Ratio")
Train02_Test02_BoCurve_file = r"D:\THOIPA_data\results\Bo_Curve\Trainset02_Testset02.bocurve.csv"
df22 = pd.read_csv(Train02_Test02_BoCurve_file, index_col=0)
df22_ratio = df22[df22.parameters == "observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df22[df22.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df22_ratio_df = df22_ratio.to_frame(name="Tra2Tes2Ratio")
Train02_Test03_BoCurve_file = r"D:\THOIPA_data\results\Bo_Curve\Trainset02_Testset03.bocurve.csv"
df23 = | pd.read_csv(Train02_Test03_BoCurve_file, index_col=0) | pandas.read_csv |
from collections import OrderedDict
import contextlib
from datetime import datetime, time
from functools import partial
import os
from urllib.error import URLError
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.util.testing as tm
@contextlib.contextmanager
def ignore_xlrd_time_clock_warning():
"""
Context manager to ignore warnings raised by the xlrd library,
regarding the deprecation of `time.clock` in Python 3.7.
"""
with warnings.catch_warnings():
warnings.filterwarnings(
action="ignore",
message="time.clock has been deprecated",
category=DeprecationWarning,
)
yield
read_ext_params = [".xls", ".xlsx", ".xlsm", ".ods"]
engine_params = [
# Add any engines to test here
# When defusedxml is installed it triggers deprecation warnings for
# xlrd and openpyxl, so catch those here
pytest.param(
"xlrd",
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param(
"openpyxl",
marks=[
td.skip_if_no("openpyxl"),
pytest.mark.filterwarnings("ignore:.*html argument"),
],
),
pytest.param(
None,
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param("odf", marks=td.skip_if_no("odf")),
]
def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool:
"""
Filter out invalid (engine, ext) pairs instead of skipping, as that
produces 500+ pytest.skips.
"""
engine = engine.values[0]
if engine == "openpyxl" and read_ext == ".xls":
return False
if engine == "odf" and read_ext != ".ods":
return False
if read_ext == ".ods" and engine != "odf":
return False
return True
def _transfer_marks(engine, read_ext):
"""
engine gives us a pytest.param objec with some marks, read_ext is just
a string. We need to generate a new pytest.param inheriting the marks.
"""
values = engine.values + (read_ext,)
new_param = pytest.param(values, marks=engine.marks)
return new_param
@pytest.fixture(
autouse=True,
params=[
_transfer_marks(eng, ext)
for eng in engine_params
for ext in read_ext_params
if _is_valid_engine_ext_pair(eng, ext)
],
)
def engine_and_read_ext(request):
"""
Fixture for Excel reader engine and read_ext, only including valid pairs.
"""
return request.param
@pytest.fixture
def engine(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return engine
@pytest.fixture
def read_ext(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return read_ext
class TestReaders:
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for read_excel calls.
"""
func = partial(pd.read_excel, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "read_excel", func)
def test_usecols_int(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["A", "B", "C"])
# usecols as int
msg = "Passing an integer for `usecols`"
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols=3)
# usecols as int
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=3
)
def test_usecols_list(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["B", "C"])
df1 = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=[0, 2, 3]
)
df2 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=[0, 2, 3]
)
# TODO add index to xls file)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_usecols_str(self, read_ext, df_ref):
df1 = df_ref.reindex(columns=["A", "B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A:D"
)
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C,D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C,D"
)
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C:D"
)
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
@pytest.mark.parametrize(
"usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]]
)
def test_usecols_diff_positional_int_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["A", "C"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=usecols
)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]])
def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["B", "D"]]
expected.index = range(len(expected))
result = pd.read_excel("test1" + read_ext, "Sheet1", usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
def test_read_excel_without_slicing(self, read_ext, df_ref):
expected = df_ref
result = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str(self, read_ext, df_ref):
expected = df_ref[["C", "D"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols="A,D:E"
)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str_invalid(self, read_ext):
msg = "Invalid column name: E1"
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, "Sheet1", usecols="D:E1")
def test_index_col_label_error(self, read_ext):
msg = "list indices must be integers.*, not str"
with pytest.raises(TypeError, match=msg):
pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=["A"], usecols=["A", "C"]
)
def test_index_col_empty(self, read_ext):
# see gh-9208
result = pd.read_excel("test1" + read_ext, "Sheet3", index_col=["A", "B", "C"])
expected = DataFrame(
columns=["D", "E", "F"],
index= | MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]) | pandas.MultiIndex |
import argparse
from tqdm import trange
import requests
import os
import sys
import csv
import pandas as pd
from time import sleep
from datetime import datetime
# URLs to make api calls
BASE_URL = "https://metamon-api.radiocaca.com/usm-api"
TOKEN_URL = f"{BASE_URL}/login"
LIST_MONSTER_URL = f"{BASE_URL}/getWalletPropertyBySymbol"
CHANGE_FIGHTER_URL = f"{BASE_URL}/isFightMonster"
START_FIGHT_URL = f"{BASE_URL}/startBattle"
LIST_BATTLER_URL = f"{BASE_URL}/getBattelObjects"
WALLET_PROPERTY_LIST = f"{BASE_URL}/getWalletPropertyList"
LVL_UP_URL = f"{BASE_URL}/updateMonster"
MINT_EGG_URL = f"{BASE_URL}/composeMonsterEgg"
CHECK_BAG_URL = f"{BASE_URL}/checkBag"
def datetime_now():
return datetime.now().strftime("%m/%d/%Y %H:%M:%S")
def post_formdata(payload, url="", headers=None):
"""Method to send request to game"""
files = []
if headers is None:
headers = {}
for _ in range(5):
try:
# Add delay to avoid error from too many requests per second
sleep(1.1)
response = requests.request("POST",
url,
headers=headers,
data=payload,
files=files)
return response.json()
except:
continue
return {}
def get_battler_score(monster):
""" Get opponent's power score"""
return monster["sca"]
def picker_battler(monsters_list):
""" Picking opponent """
battlers = list(filter(lambda m: m["rarity"] == "N", monsters_list))
if len(battlers) == 0:
battlers = list(filter(lambda m: m["rarity"] == "R", monsters_list))
battler = battlers[0]
score_min = get_battler_score(battler)
for i in range(1, len(battlers)):
score = get_battler_score(battlers[i])
if score < score_min:
battler = battlers[i]
score_min = score
return battler
def pick_battle_level(level=1):
# pick highest league for given level
if 21 <= level <= 40:
return 2
if 41 <= level <= 60:
return 3
return 1
class MetamonPlayer:
def __init__(self,
address,
sign,
msg="LogIn",
auto_lvl_up=False,
output_stats=False):
self.no_enough_money = False
self.output_stats = output_stats
self.total_bp_num = 0
self.total_success = 0
self.total_fail = 0
self.mtm_stats_df = []
self.token = None
self.address = address
self.sign = sign
self.msg = msg
self.auto_lvl_up = auto_lvl_up
def init_token(self):
"""Obtain token for game session to perform battles and other actions"""
payload = {"address": self.address, "sign": self.sign, "msg": self.msg,
"network": "1", "clientType": "MetaMask"}
response = post_formdata(payload, TOKEN_URL)
if response.get("code") != "SUCCESS":
sys.stderr.write("Login failed, token is not initialized. Terminating\n")
sys.exit(-1)
self.token = response.get("data").get("accessToken")
def change_fighter(self, monster_id):
"""Switch to next metamon if you have few"""
payload = {
"metamonId": monster_id,
"address": self.address,
}
post_formdata(payload, CHANGE_FIGHTER_URL)
def list_battlers(self, monster_id, front=1):
"""Obtain list of opponents"""
payload = {
"address": self.address,
"metamonId": monster_id,
"front": front,
}
headers = {
"accessToken": self.token,
}
response = post_formdata(payload, LIST_BATTLER_URL, headers)
return response.get("data", {}).get("objects")
def start_fight(self,
my_monster,
target_monster_id,
loop_count=1):
""" Main method to initiate battles (as many as monster has energy for)"""
success = 0
fail = 0
total_bp_fragment_num = 0
mtm_stats = []
my_monster_id = my_monster.get("id")
my_monster_token_id = my_monster.get("tokenId")
my_level = my_monster.get("level")
my_power = my_monster.get("sca")
battle_level = pick_battle_level(my_level)
tbar = trange(loop_count)
tbar.set_description(f"Fighting with {my_monster_token_id}...")
for _ in tbar:
payload = {
"monsterA": my_monster_id,
"monsterB": target_monster_id,
"address": self.address,
"battleLevel": battle_level,
}
headers = {
"accessToken": self.token,
}
response = post_formdata(payload, START_FIGHT_URL, headers)
code = response.get("code")
if code == "BATTLE_NOPAY":
self.no_enough_money = True
break
data = response.get("data", {})
if data is None:
print(f"Metamon {my_monster_id} cannot fight skipping...")
break
fight_result = data.get("challengeResult", False)
bp_fragment_num = data.get("bpFragmentNum", 10)
if self.auto_lvl_up:
# Try to lvl up
res = post_formdata({"nftId": my_monster_id, "address": self.address},
LVL_UP_URL,
headers)
code = res.get("code")
if code == "SUCCESS":
tbar.set_description(f"LVL UP successful! Continue fighting with {my_monster_token_id}...")
my_level += 1
# Update league level if new level is 21 or 41
battle_level = pick_battle_level(my_level)
self.total_bp_num += bp_fragment_num
total_bp_fragment_num += bp_fragment_num
if fight_result:
success += 1
self.total_success += 1
else:
fail += 1
self.total_fail += 1
mtm_stats.append({
"My metamon id": my_monster_token_id,
"League lvl": battle_level,
"Total battles": loop_count,
"My metamon power": my_power,
"My metamon level": my_level,
"Victories": success,
"Defeats": fail,
"Total egg shards": total_bp_fragment_num,
"Timestamp": datetime_now()
})
mtm_stats_df = pd.DataFrame(mtm_stats)
print(mtm_stats_df)
self.mtm_stats_df.append(mtm_stats_df)
def get_wallet_properties(self):
""" Obtain list of metamons on the wallet"""
data = []
payload = {"address": self.address}
headers = {
"accesstoken": self.token,
}
response = post_formdata(payload, WALLET_PROPERTY_LIST, headers)
mtms = response.get("data", {}).get("metamonList", [])
if len(mtms) > 0:
data.extend(mtms)
data = sorted(data, key=lambda metamon: metamon['id'])
else:
if 'code' in response and response['code'] == 'FAIL':
print(response['message'])
return data
def list_monsters(self):
""" Obtain list of metamons on the wallet (deprecated)"""
payload = {"address": self.address, "page": 1, "pageSize": 60, "payType": -6}
headers = {"accessToken": self.token}
response = post_formdata(payload, LIST_MONSTER_URL, headers)
monsters = response.get("data", {}).get("data", {})
return monsters
def battle(self, w_name=None):
""" Main method to run all battles for the day"""
if w_name is None:
w_name = self.address
summary_file_name = f"{w_name}_summary.tsv"
mtm_stats_file_name = f"{w_name}_stats.tsv"
self.init_token()
wallet_monsters = self.get_wallet_properties()
print(f"Monsters total: {len(wallet_monsters)}")
available_monsters = [
monster for monster in wallet_monsters if (monster.get("level") < 60 or monster.get("allowReset") == True) and monster.get("tear") > 0
]
level60_monsters = [
monster for monster in wallet_monsters if monster.get("level") >= 60
]
stats_l = []
print(f"Available Monsters : {len(available_monsters)}")
print(f"Level 60 Monsters : {len(level60_monsters)}")
for monster in available_monsters:
monster_id = monster.get("id")
tear = monster.get("tear")
level = monster.get("level")
exp = monster.get("exp")
if int(exp) >= 600 or (int(level) >= 60 and int(exp) >= 395):
print(f"Monster {monster_id} cannot fight due to "
f"exp overflow. Skipping...")
continue
battlers = self.list_battlers(monster_id)
battler = picker_battler(battlers)
target_monster_id = battler.get("id")
self.change_fighter(monster_id)
self.start_fight(monster,
target_monster_id,
loop_count=tear)
if self.no_enough_money:
print("Not enough u-RACA")
break
total_count = self.total_success + self.total_fail
success_percent = .0
if total_count > 0:
success_percent = (self.total_success / total_count) * 100
if total_count <= 0:
print("No battles to record")
return
stats_l.append({
"Victories": self.total_success,
"Defeats": self.total_fail,
"Win Rate": f"{success_percent:.2f}%",
"Total Egg Shards": self.total_bp_num,
"Datetime": datetime_now()
})
stats_df = pd.DataFrame(stats_l)
print(stats_df)
if os.path.exists(summary_file_name) and self.output_stats:
back_fn = f"{summary_file_name}.bak"
os.rename(summary_file_name, back_fn)
tmp_df = | pd.read_csv(back_fn, sep="\t", dtype="str") | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from pandas.util import hash_pandas_object
import dask.dataframe as dd
from dask.dataframe import _compat
from dask.dataframe._compat import tm
from dask.dataframe.utils import assert_eq
@pytest.mark.parametrize(
"obj",
[
pd.Series([1, 2, 3]),
pd.Series([1.0, 1.5, 3.2]),
pd.Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),
pd.Series(["a", "b", "c"]),
pd.Series([True, False, True]),
pd.Index([1, 2, 3]),
pd.Index([True, False, True]),
pd.DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}),
_compat.makeMissingDataframe(),
_compat.makeMixedDataFrame(),
_compat.makeTimeDataFrame(),
_compat.makeTimeSeries(),
_compat.makeTimedeltaIndex(),
],
)
def test_hash_pandas_object(obj):
a = hash_pandas_object(obj)
b = hash_pandas_object(obj)
if isinstance(a, np.ndarray):
np.testing.assert_equal(a, b)
else:
assert_eq(a, b)
def test_categorical_consistency():
# Check that categoricals hash consistent with their values, not codes
# This should work for categoricals of any dtype
for s1 in [
pd.Series(["a", "b", "c", "d"]),
pd.Series([1000, 2000, 3000, 4000]),
pd.Series(pd.date_range(0, periods=4)),
]:
s2 = s1.astype("category").cat.set_categories(s1)
s3 = s2.cat.set_categories(list(reversed(s1)))
for categorize in [True, False]:
# These should all hash identically
h1 = hash_pandas_object(s1, categorize=categorize)
h2 = hash_pandas_object(s2, categorize=categorize)
h3 = | hash_pandas_object(s3, categorize=categorize) | pandas.util.hash_pandas_object |
# Copyright (c) 2020 Civic Knowledge. This file is licensed under the terms of the
# MIT license included in this distribution as LICENSE
import logging
import re
from collections import defaultdict, deque
from pathlib import Path
from time import time
import pandas as pd
from synpums.util import *
''
_logger = logging.getLogger(__name__)
def sample_to_sum(N, df, col, weights):
"""Sample a number of records from a dataset, then return the smallest set of
rows at the front of the dataset where the weight sums to more than N"""
t = df.sample(n=N, weights=weights, replace=True)
# Get the number of records that sum to N.
arg = t[col].cumsum().sub(N).abs().astype(int).argmin()
return t.iloc[:arg + 1]
def rms(s):
"""Root mean square"""
return np.sqrt(np.sum(np.square(s)))
def vector_walk_callback(puma_task, tract_task, data, memo):
pass
def make_acs_target_df(acs, columns, geoid):
t = acs.loc[geoid]
target_map = {c + '_m90': c for c in columns if "WGTP" not in columns}
target_df = pd.DataFrame({
'est': t[target_map.values()],
'm90': t[target_map.keys()].rename(target_map)
})
target_df['est_min'] = target_df.est - target_df.m90
target_df['est_max'] = target_df.est + target_df.m90
target_df.loc[target_df.est_min < 0, 'est_min'] = 0
return target_df.astype('Int64')
def geoid_path(geoid):
from pathlib import Path
from geoid.acs import AcsGeoid
go = AcsGeoid.parse(geoid)
try:
return Path(f"{go.level}/{go.stusab}/{go.county:03d}/{str(go)}.csv")
except AttributeError:
return Path(f"{go.level}/{go.stusab}/{str(go)}.csv")
class AllocationTask(object):
"""Represents the allocation process to one tract"""
def __init__(self, region_geoid, puma_geoid, acs_ref, hh_ref, cache_dir):
self.region_geoid = region_geoid
self.puma_geoid = puma_geoid
self.acs_ref = acs_ref
self.hh_ref = hh_ref
self.cache_dir = cache_dir
self.sample_pop = None
self.sample_weights = None
self.unallocated_weights = None # Initialized to the puma weights, gets decremented
self.target_marginals = None
self.allocated_weights = None
self.household_count = None
self.population_count = None
self.gq_count = None
self.gq_cols = None
self.sex_age_cols = None
self.hh_size_cols = None
self.hh_race_type_cols = None
self.hh_eth_type_cols = None
self.hh_income_cols = None
self._init = False
self.running_allocated_marginals = None
# A version of the sample_pop constructed by map_cp, added as an instance var so
# the probabilities can be manipulated during the vector walk.
self.cp_df = None
self.cp_prob = None
@property
def row(self):
from geoid.acs import AcsGeoid
tract = AcsGeoid.parse(self.region_geoid)
return [tract.state, tract.stusab, tract.county, self.region_geoid, self.puma_geoid, str(self.acs_ref),
str(self.hh_ref)]
def init(self, use_sample_weights=False, puma_weights=None):
"""Load all of the data, just before running the allocation"""
if isinstance(self.hh_ref, pd.DataFrame):
hh_source = self.hh_ref
else:
hh_source = pd.read_csv(self.hh_ref, index_col='SERIALNO', low_memory=False) \
.drop(columns=['geoid'], errors='ignore').astype('Int64')
if isinstance(self.acs_ref, pd.DataFrame):
acs = self.acs_ref
else:
acs = pd.read_csv(self.acs_ref, index_col='geoid', low_memory=False)
# These are only for debugging.
#self.hh_source = hh_source
#self.tract_acs = acs
return self._do_init(hh_source, acs, puma_weights=puma_weights)
def _do_init(self, hh_source, acs, puma_weights=None):
self.serialno = hh_source.index
# Col 0 is the WGTP column
w_cols = [c for c in hh_source.columns if "WGTP" in c]
not_w_cols = [c for c in hh_source.columns if "WGTP" not in c]
# Not actually a sample pop --- populations are supposed to be unweighted
self.sample_pop = hh_source[['WGTP'] + not_w_cols].iloc[:, 1:].reset_index(drop=True).astype(int)
# Shouldn't this be:
# self.sample_pop = hh_source[not_w_cols].reset_index(drop=True).astype(int)
self.sample_weights = hh_source.iloc[:, 0].reset_index(drop=True).astype(int)
assert self.sample_pop.shape[0] == self.sample_weights.shape[0]
not_w_cols = [c for c in hh_source.columns if "WGTP" not in c]
self.target_marginals = make_acs_target_df(acs, not_w_cols, self.region_geoid)
self.household_count = acs.loc[self.region_geoid].b11016_001
self.population_count = acs.loc[self.region_geoid].b01003_001
self.gq_count = acs.loc[self.region_geoid].b26001_001
self.total_count = self.household_count + self.gq_count
self.allocated_weights = np.zeros(len(self.sample_pop))
self.unallocated_weights = puma_weights if puma_weights is not None else self.sample_weights.copy()
self.running_allocated_marginals = pd.Series(0, index=self.target_marginals.index)
# Sample pop, normalized to unit length to speed up cosine similarity
self.sample_pop_norm = vectors_normalize(self.sample_pop.values)
# Column sets
self.gq_cols = ['b26001_001']
self.sex_age_cols = [c for c in hh_source.columns if c.startswith('b01001')]
self.hh_size_cols = [c for c in hh_source.columns if c.startswith('b11016')]
p = re.compile(r'b11001[^hi]_')
self.hh_race_type_cols = [c for c in hh_source.columns if p.match(c)]
p = re.compile(r'b11001[hi]_')
self.hh_eth_type_cols = [c for c in hh_source.columns if p.match(c)]
p = re.compile(r'b19025')
self.hh_income_cols = [c for c in hh_source.columns if p.match(c)]
# We will use this identity in the numpy version of step_scjhedule
# assert all((self.cp.index / 2).astype(int) == self['index'])
self.rng = np.random.default_rng()
self.make_cp(self.sample_pop)
self._init = True
return acs
def make_cp(self, sp):
"""Make a version of the sample population with two records for each
row, one the negative of the one before it. This is used to generate
rows that can be used in the vector walk."""
self.cp = pd.concat([sp, sp]).sort_index().reset_index()
self.cp.insert(1, 'sign', 1)
self.cp.insert(2, 'select_weight', 0)
self.cp.iloc[0::2, 1:] = self.cp.iloc[0::2, 1:] * -1 # flip sign on the marginal counts
self.update_cp()
return self.cp
def update_cp(self):
self.cp.loc[0::2, 'select_weight'] = self.allocated_weights.tolist()
self.cp.loc[1::2, 'select_weight'] = self.unallocated_weights.tolist()
def set_cp_prob(self, cp_prob):
pass
@property
def path(self):
return Path(self.cache_dir).joinpath(geoid_path(str(self.region_geoid))).resolve()
@property
def pums(self):
"""Return the PUMS household and personal records for this PUMA"""
from .pums import build_pums_dfp_dfh
from geoid.acs import Puma
puma = Puma.parse(self.puma_geoid)
dfp, dfh = build_pums_dfp_dfh(puma.stusab, year=2018, release=5)
return dfp, dfh
def get_saved_frame(self):
if self.path.exists():
return pd.read_csv(self.path.resolve(), low_memory=False)
else:
return None
@property
def results_frame(self):
return pd.DataFrame({
'geoid': self.region_geoid,
'serialno': self.serialno,
'weight': self.allocated_weights
})
def save_frame(self):
self.path.parent.mkdir(parents=True, exist_ok=True)
df = pd.DataFrame({
'serialno': self.serialno,
'weight': self.allocated_weights
})
df = df[df.weight > 0]
df.to_csv(self.path, index=False)
def load_frame(self):
df = | pd.read_csv(self.path, low_memory=False) | pandas.read_csv |
# License: BSD_3_clause
#
# Copyright (c) 2015, <NAME>, <NAME>, <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the Technical University of Denmark (DTU)
# nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import pandas as pd
import numpy as np
from datetime import datetime, time
import gc
import math
import random
class expando:
pass
#Function needed to define distances between nodes from longitudes and latitudes
def distance_from_long_lat(lat1, long1, lat2, long2):
# Convert latitude and longitude to spherical coordinates in radians.
degrees_to_radians = math.pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
# theta = longitude
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates (1, theta, phi) and (1, theta', phi')
# cosine( arc length ) = sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +
math.cos(phi1)*math.cos(phi2))
if cos>1:#numerical approximations can bring to a number slightly >1
cos=1
arc = math.acos( cos )
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
R_earth = 6371 #km
arc = arc * R_earth
return arc
class dataReader:
def __init__(self, countries,max_number_loc,renewable_type,data_type,start_time,
end_time,fore_start_time,fore_end_time,nbr_leadTimes,folder_location):
self._set_attributes(countries,max_number_loc,renewable_type,data_type,start_time,
end_time,fore_start_time,fore_end_time,nbr_leadTimes,folder_location)
self._check_countries()
self._load_observations()
self._tod_observations()
self._load_forecasts()
self._get_distances()
print('Data has been imported!')
pass
#Function that stores all inputs as attributes of the output
def _set_attributes(self, countries,max_number_loc,renewable_type,data_type,start_time,
end_time,fore_start_time,fore_end_time,nbr_leadTimes,folder_location):
self.attributes = expando()
self.attributes.renew_type = renewable_type
self.attributes.data_type = data_type
self.attributes.folder_loc = folder_location
self.attributes.start_time = start_time
self.attributes.end_time = end_time
self.attributes.fore_start_time = fore_start_time
self.attributes.fore_end_time = fore_end_time
self.attributes.nbr_leadT = nbr_leadTimes
self.attributes.countries = countries
self.attributes.max_number_loc = max_number_loc
self.metadata = expando()
pass
#Function that check input countries and display an error message if they
#don't correspond. Returns the available countries and indices from nodes
def _check_countries(self):
self.metadata.network_nodes = pd.read_csv(self.attributes.folder_loc+'/Metadata/network_nodes.csv',
sep=',')
available_countries = set(self.metadata.network_nodes.country)
countries = self.attributes.countries
if bool(countries-available_countries.intersection(countries)):
print(', '.join(countries-available_countries.intersection(countries)) + \
' are not in the country list. ' + 'See in:' + ', '.join(available_countries))
self.attributes.countries = list(available_countries.intersection(countries))
ix_net_nodes_bool = np.in1d(self.metadata.network_nodes.country, self.attributes.countries)
self.metadata.ix_nodes = np.where(ix_net_nodes_bool)[0]+1
if self.attributes.max_number_loc != None and len(self.metadata.ix_nodes)>self.attributes.max_number_loc:
self.metadata.ix_nodes = np.sort(random.sample(list(self.metadata.ix_nodes),
self.attributes.max_number_loc))
print('The number of nodes selected was higher than the maximum number of locations (' +\
str(self.attributes.max_number_loc) + ') and therefore reduced.')
pass
#Function that loads observations and stores them in the 'obs' attribute of output
def _load_observations(self):
filename = self.attributes.folder_loc + '/Nodal_TS/' + self.attributes.renew_type + \
'_signal_' + self.attributes.data_type + '.csv'
data_observations_aux = pd.read_csv(filename, sep=',')
#Getting observations of training period
ix_time_bool = np.in1d(data_observations_aux.Time,
[self.attributes.start_time,self.attributes.end_time])
ix_time = np.where(ix_time_bool)[0]
if len(ix_time) == 1:
sys.exit('Training period contains only one element.'+ \
'There must be an error in the definition of starting/ending dates.'+\
'Check day, month and year selected. Remember that data are available hourly only.')
ix_net_nodes = np.append(0, self.metadata.ix_nodes)
data_observations = data_observations_aux.ix[ix_time[0]:ix_time[len(ix_time)-1],
ix_net_nodes]
data_observations.Time = pd.to_datetime(data_observations.Time)
del ix_time_bool, ix_time
#Getting observations of testing period
ix_time_bool = np.in1d(data_observations_aux.Time,
[self.attributes.fore_start_time,self.attributes.fore_end_time])
ix_time = np.where(ix_time_bool)[0]
data_observations_cf = data_observations_aux.ix[ix_time[0]:ix_time[len(ix_time)-1],
ix_net_nodes]
data_observations_cf.Time = pd.to_datetime(data_observations_cf.Time)
#Define colnames with locations
new_col_names = [None] * len(data_observations.columns)
new_col_names[0] = 'Time'
for icol, col_name in enumerate(data_observations.columns[1:], start=1):
new_col_names[icol] = 'id_' + col_name
self.metadata.id_nodes = new_col_names[1:]
data_observations.columns = new_col_names
data_observations_cf.columns = new_col_names
data_observations.reset_index(drop=True, inplace=True)
data_observations_cf.reset_index(drop=True, inplace=True)
del data_observations_aux, filename
self.obs = data_observations
self.current_fore = expando()
self.current_fore.obs = data_observations_cf
pass
#Function that defines the time of day horizon of predictions/observations
#Dataset contains only hourly information but it can be adapted for other
#markets
def _tod_observations(self):
#Assumption of an hourly day discretisation, to be adapted better if
#intraday market or other kinds are to be considered
time_of_day = [time(ih,0,0,0) for ih in range(24)]
tod_name = [None] * len(time_of_day)
#defining the repartition in day for later climatology application
for index,itime in enumerate(time_of_day):
if itime.hour<10: h_name= '0' + str(itime.hour)
else: h_name = str(itime.hour)
if itime.minute<10: min_name= '0' + str(itime.minute)
else: min_name = str(itime.minute)
tod_name[index] = 'h_'+ h_name + '_' + min_name
self.metadata.tod = time_of_day
self.metadata.tod_label = tod_name
pass
#Function that loads predictions and stores them in the 'fore' attribute of output
def _load_forecasts(self):
#Define lead times labels
forecast_ahead = [None] * self.attributes.nbr_leadT
for leadT in range(1,self.attributes.nbr_leadT+1):
if leadT<10: nb_name= '0' + str(leadT)
else: nb_name = str(leadT)
forecast_ahead[leadT-1] = 'leadT_' + nb_name
self.metadata.fore_leadT = forecast_ahead
#loading of forecasts data under data_forecasts
data_forecasts = expando()
data_forecasts_cf = expando()
empty_df = pd.DataFrame(columns=self.obs.columns)
for leadT_name in self.metadata.fore_leadT:
setattr(data_forecasts, leadT_name, empty_df)
setattr(data_forecasts_cf, leadT_name, empty_df)
for iforecast in os.listdir(self.attributes.folder_loc + '/Nodal_FC/'):
iforecast_asDate = datetime(int(iforecast[:4]), int(iforecast[4:6]), int(iforecast[6:8]),
int(iforecast[8:]),0,0)
iforecast_asDate = iforecast_asDate.strftime("%Y-%m-%d %H:%M:%S")
if iforecast_asDate>=self.attributes.start_time and iforecast_asDate<=self.attributes.end_time:
filename = self.attributes.folder_loc + '/Nodal_FC/' + iforecast + \
'/' + self.attributes.renew_type + '_forecast.csv'
data_forecasts_aux = | pd.read_csv(filename, sep=',') | pandas.read_csv |
import numpy as np
import pandas as pd
import re
import math
from typing import Union
def get_all_entity(tuples: list) -> list:
entities = []
for _tuple in tuples:
if _tuple[1] in ["类型", "产地", "生产时间(s)"]:
if not "公式" in _tuple[0]:
entities.append(_tuple[0])
else:
entities.append(_tuple[0])
entities.append(_tuple[-1])
return list(set(entities))
def get_category_list(tuples:list) -> list:
category_list = []
entity_list = []
for _tuple in tuples:
if _tuple[1] == "类型":
category_list.append(_tuple)
entity_list.append(_tuple[0])
return category_list, entity_list
class DysonTuples:
def __init__(self, txt_path:str, extra_resources:list) -> None:
self.txt_path = txt_path
self.extra_resources = extra_resources
self.dyson_tuples = self.read_tuples_txt(txt_path)
self._all_formulas = self.get_all_formulas()
self._all_formulas_dict = {}
self._all_raw_products = self.get_all_raw_products(self.dyson_tuples)
for formula in self._all_formulas:
if formula in ["X射线裂解公式",'重氢公式(2)']:
continue
self._all_formulas_dict[formula] = self.get_formula_dict(formula)
self.remove_special_formula()
self._all_products_formulas_dict = self.get_all_products_formulas_dict(self._all_formulas_dict)
self._all_products_formulas_dict["氢"].remove("反物质公式")
self._all_products_formulas_dict["氢"].remove("等离子精炼公式")
def check_extra_resources(self):
for resource in self.extra_resources:
extra_resources = ["硫酸","光栅石","分型硅石","单级磁石","刺笋结晶","金伯利矿石","可燃冰"]
if resource not in extra_resources:
raise ValueError("extra_resource must in {}, but {} received".format(extra_resources, resource))
def read_tuples_txt(self, txt_path: str) -> list:
with open(txt_path, encoding="utf-8") as f:
contents = f.readlines()
tuples = []
for content in contents:
head, rel, tail = re.split("\t|\n", content)[:-1]
tuples.append((head, rel, tail))
return tuples
def get_all_products_formulas_dict(self, formulas_dict:dict) -> dict:
product_formula_dict = {}
for formula in formulas_dict.keys():
products = list(formulas_dict[formula]['产物'].keys())
for _product in products:
if _product not in product_formula_dict.keys():
product_formula_dict[_product] = [formula]
else:
product_formula_dict[_product].append(formula)
return product_formula_dict
def get_all_raw_products(self, tuples:list) -> list:
raw_products = []
for _tuple in tuples:
if _tuple[2] == "原料":
raw_products.append(_tuple[0])
return raw_products
def get_all_formulas(self) -> list:
formulas = []
for _tuple in self.dyson_tuples:
if _tuple[-1] == "生产公式":
formulas.append(_tuple[0])
return list(set(formulas))
def get_formula_dict(self, formula:str) -> dict:
temp_list = []
for _tuple in self.dyson_tuples:
if formula in _tuple:
temp_list.append(_tuple)
formula_dict = self._get_formula_dict_from_list(formula, temp_list)
return formula_dict
def _get_formula_dict_from_list(self, formula:str, formula_list:list) -> dict:
temp_dict = {'名称': formula,'原料':{},'产物':{}}
for _tuple in formula_list:
try:
count = int(_tuple[1])
if _tuple[0] == formula:
temp_dict['产物'][_tuple[-1]] = count
else:
temp_dict['原料'][_tuple[0]] = count
except:
try:
temp_dict[_tuple[1]] = int(_tuple[-1])
except:
temp_dict[_tuple[1]] = _tuple[-1]
return temp_dict
def find_method(self, target_product:str) -> list:
'''
count: nums/s
'''
methods = []
for _tuple in self.dyson_tuples:
if _tuple[-1] == target_product:
methods.append(_tuple[0])
return methods
def remove_special_formula(self):
self.all_raw_products.append("硅石")
self.all_formulas_dict.pop("石材公式")
if "硫酸" in self.extra_resources:
self.all_formulas_dict.pop("硫酸公式")
self.all_raw_products.append("硫酸")
if "光栅石" in self.extra_resources:
self.all_formulas_dict.pop("光子合并器公式")
self.all_formulas_dict.pop("卡西米尔晶体公式")
else:
self.all_formulas_dict.pop("光子合并器公式(高效)")
self.all_formulas_dict.pop("卡西米尔晶体公式(高效)")
if "分型硅石" in self.extra_resources:
self.all_formulas_dict.pop("晶格硅公式")
else:
self.all_formulas_dict.pop("晶格硅公式(高效)")
if "单级磁石" in self.extra_resources:
self.all_formulas_dict.pop("粒子容器公式")
else:
self.all_formulas_dict.pop("粒子容器公式(高效)")
if "刺笋结晶" in self.extra_resources:
self.all_formulas_dict.pop("碳纳米管公式")
else:
self.all_formulas_dict.pop("碳纳米管公式(高效)")
if "金伯利矿石" in self.extra_resources:
self.all_formulas_dict.pop("金刚石公式")
else:
self.all_formulas_dict.pop("金刚石公式(高效)")
if "可燃冰" in self.extra_resources:
self.all_formulas_dict.pop("石墨烯公式")
else:
self.all_formulas_dict.pop("石墨烯公式(高效)")
@property
def all_formulas(self):
return self._all_formulas
@property
def all_formulas_dict(self):
return self._all_formulas_dict
@property
def all_raw_products(self):
return self._all_raw_products
@property
def all_products_formulas_dict(self):
return self._all_products_formulas_dict
class DysonTuplesAnalysis(DysonTuples):
def __init__(self, txt_path, extra_resources:list) -> None:
super(DysonTuplesAnalysis, self).__init__(txt_path, extra_resources)
def __call__(self, target_product:str, count:float, filter_station_num:int=np.inf):
plan_list = []
all_list = self._analysis_method(target_product, count, plan_list)
for i in range(len(all_list)):
# all_list[i] = self.analysis_result(all_list[i], filter_station_num)
all_list[i] = self.analysis_result_pivot_table(all_list[i])
return all_list
def analysis_result_pivot_table(self, plan_list:list):
plan_dict = {"公式":[],"产地":[],"数量":[]}
extra_dict = {"额外产物":[],"数量/s":[]}
for _plan in plan_list:
plan_dict["公式"].append(_plan["公式"])
plan_dict["产地"].append(_plan["产地"])
plan_dict["数量"].append(_plan["数量"])
if "额外产物" in _plan.keys():
for _extra_product in _plan["额外产物"]:
product_name = list(_extra_product.keys())[0]
extra_dict["额外产物"].append(product_name)
extra_dict["数量/s"].append(_extra_product[product_name])
plan_df = pd.DataFrame(plan_dict)
plan_pt = pd.pivot_table(plan_df,index=["公式","产地"],values=["数量"],aggfunc=np.sum)
extra_df = | pd.DataFrame(extra_dict) | pandas.DataFrame |
__author__ = 'lucabasa'
__version__ = '1.0'
__status__ = 'development'
import numpy as np
import pandas as pd
from utilities import read_data
import feature_eng as fe
import feature_selection as fs
import model_selection as ms
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
agg_loc = 'processed_data/'
agg_name = 'total_aggregation_with_FE_0219.csv'
save_loc = 'results/stack_n_blend/'
model_list = {'lightGBM': ms.lightgbm_train,
'XGB': ms.xgb_train,
'lightGMBrf': ms.lightgbm_rf,
'RF': ms.rf_train,
'extra': ms.extratrees_train}
sel_list = {'only_hist': fs.sel_hist,
'only_new': fs.sel_new,
'only_money': fs.sel_money,
'only_counts': fs.sel_counts,
'no_money': fs.sel_nomoney,
'full': fs.sel_all}
def stack():
train = pd.read_csv('results/stack_n_blend/oof_predictions.csv')
del train['Unnamed: 0']
test = pd.read_csv('results/stack_n_blend/all_predictions.csv')
target = train['target']
id_to_sub = test.card_id
kfolds = KFold(5, shuffle=True, random_state=42)
predictions, cv_score, feat_imp, oof = ms.rf_train(train, test, target, kfolds)
print(f'random forest:\t {cv_score}')
sub_df = pd.DataFrame({"card_id":id_to_sub.values})
sub_df['target'] = predictions
sub_df.to_csv('stack_rf.csv', index=False)
feat_imp.to_csv('stack_rf_featimp.csv', index=False)
predictions, cv_score, feat_imp, oof = ms.extratrees_train(train, test, target, kfolds)
print(f'Extra trees:\t {cv_score}')
sub_df = pd.DataFrame({"card_id":id_to_sub.values})
sub_df['target'] = predictions
sub_df.to_csv('stack_extratrees.csv', index=False)
feat_imp.to_csv('stack_extratrees_featimp.csv', index=False)
predictions, cv_score, feat_imp, oof = ms.lightgbm_train(train, test, target, kfolds)
print(f'lightGBM:\t {cv_score}')
sub_df = pd.DataFrame({"card_id":id_to_sub.values})
sub_df['target'] = predictions
sub_df.to_csv(save_loc + 'stack_lightgbm.csv', index=False)
feat_imp.to_csv(save_loc + 'stack_lightgbm_featimp.csv', index=False)
def blend():
train = pd.read_csv('results/stack_n_blend/oof_predictions.csv')
del train['Unnamed: 0']
test = pd.read_csv('results/stack_n_blend/all_predictions.csv')
target = train['target']
id_to_sub = test.card_id
kfolds = KFold(5, shuffle=True, random_state=42)
del train['target']
train['oof_score'] = train.mean(axis=1)
print('Full blend: ', mean_squared_error(train.oof_score, target)**0.5)
del train['oof_score']
scores = pd.read_csv('results/stack_n_blend/single_cvscores.csv')
scores = scores.rename(columns={'Unnamed: 0': 'models'})
for num in np.arange(1, 15):
best_blends = scores.sort_values(by='CV_score').head(num).models.values
train['oof_score'] = train[best_blends].mean(axis=1)
print(f'Best {num} blends: ', mean_squared_error(train.oof_score, target)**0.5)
del train['oof_score']
tot_score = scores.CV_score.sum()
for model in scores.models.unique():
train[model] = train[model] * (scores[scores.models == model].CV_score.values[0] / tot_score)
train['oof_score'] = train.sum(axis=1)
print('Weighted blend: ', mean_squared_error(train.oof_score, target)**0.5)
def single_model():
train = read_data('raw_data/train.csv')
test = read_data('raw_data/test.csv')
df_tr = pd.read_csv(agg_loc + agg_name)
train = pd.merge(train, df_tr, on='card_id', how='left').fillna(0)
test = pd.merge(test, df_tr, on='card_id', how='left').fillna(0)
del df_tr
train = fe.combine_categs(train)
test = fe.combine_categs(test)
kfolds = KFold(5, shuffle=True, random_state=42)
results = {}
for_second_level = pd.DataFrame({'target': train['target']})
for model in model_list.keys():
to_train = model_list.get(model)
for selection in sel_list:
to_select = sel_list.get(selection)
print(f'{model}_{selection}')
df_train = train.copy()
df_test = test.copy()
target = df_train['target']
id_to_sub = df_test['card_id']
del df_train['target']
del df_train['card_id']
del df_test['card_id']
df_train, df_test = to_select(df_train, df_test)
predictions, cv_score, feat_imp, oof = to_train(df_train, df_test, target, kfolds)
results[model + '_' + selection] = cv_score
for_second_level[model + '_' + selection] = oof
sub_df = pd.DataFrame({"card_id":id_to_sub.values})
sub_df["target"] = predictions
sub_df.to_csv(save_loc + model + '_' + selection + '.csv', index=False)
feat_imp.to_csv(save_loc + model + '_' + selection + "_featimp.csv", index=False)
for_second_level.to_csv(save_loc + 'oof_predictions.csv')
print(f'{model}_{selection}:\t {cv_score}')
print('_'*40)
print('_'*40)
print('\n')
final = pd.DataFrame.from_dict(results, orient='index', columns=['CV_score'])
final.to_csv(save_loc + 'single_cvscores.csv')
for_second_level.to_csv(save_loc + 'oof_predictions.csv')
def stack_with_features():
train = read_data('raw_data/train.csv')
test = read_data('raw_data/test.csv')
df_tr = pd.read_csv(agg_loc + agg_name)
train = pd.merge(train, df_tr, on='card_id', how='left').fillna(0)
test = pd.merge(test, df_tr, on='card_id', how='left').fillna(0)
del df_tr
train = fe.combine_categs(train)
test = fe.combine_categs(test)
train = train[['card_id', 'target'] + [col for col in train.columns if 'purchase' in col or 'month' in col]]
test = test[['card_id'] + [col for col in train.columns if 'purchase' in col or 'month' in col]]
print(train.columns)
stacked = | pd.read_csv('results/stack_n_blend/oof_predictions.csv') | pandas.read_csv |
import os
import io
import random
import json
import pandas as pd
def create_eval_files():
#test_file = 'data/rest_e2e/devset_e2e.csv'
test_file = 'data/rest_e2e/testset_e2e.csv'
#num_instances = 547 # for devset_e2e.csv
num_instances = 630 # for testset_e2e.csv
num_samples = 40
prediction_files = os.listdir('eval/predictions')
header = ['sample #', 'MR']
data = []
# generate random sample of indexes
sample_idxs = random.sample(range(0, num_instances), num_samples)
data.append(sample_idxs)
# map the filenames onto random numbers
file_idxs_random = [i for i in range(len(prediction_files))]
random.shuffle(file_idxs_random)
files_dict = {}
for i, filename in enumerate(prediction_files):
files_dict[file_idxs_random[i]] = filename
# store the file index map into a file to be used as a key
with open('eval/file_map.json', 'w') as f_file_map:
json.dump(files_dict, f_file_map, indent=4, sort_keys=True)
# sample the MRs
data_frame_test = | pd.read_csv(test_file, header=0, encoding='utf8') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 10 14:24:15 2022
@author: shshyam
"""
from importlib.resources import path
import h5py
import boto3
from botocore.handlers import disable_signing
from os import walk
import os
import pandas as pd
from geopy import distance
from geopy import Point
import gcsfs
import numpy as np
fs=gcsfs.GCSFileSystem(project="sevir-project-bdia",token="cloud_storage_creds.json")
def searchincache(lat,long,distlimit):
print('In Search cache function')
cache_file=fs.open("gs://sevir-data-2/sevir_cache.csv",'rb')
cache = pd.read_csv(cache_file)
myloc=Point(lat,long)
cache['distance']=cache.apply(lambda row: distancer(row,myloc), axis=1)
cache=cache[cache["distance"] < int(distlimit)]
if cache.empty:
return 'N',None,None
else:
cache=cache.sort_values(by='distance')
fileloc=cache.iloc[0]['image_location']
timestamp=cache.iloc[0]['timestamp']
print("Searched and found:",lat,":",long,":",fileloc)
return 'Y',timestamp,fileloc
def searchgeocoordinates(approxlat,approxlong,distlimit):
print('In search GeoCoordinates function')
catalog = | pd.read_csv("https://raw.githubusercontent.com/MIT-AI-Accelerator/eie-sevir/master/CATALOG.csv") | pandas.read_csv |
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from app.db.psql import db, session
from app.models.etf import EtfFundFlow, \
get_etf_fund_flows, \
get_sector_tickers
def create_flow_data(tickers, start, end):
## Using convention for return identification:
## Simple returns denoted with capital R
## log returns identified by lowercase r
etf_fund_flows = get_etf_fund_flows(tickers, start, end)
etf_fund_flows['daily_R'] = etf_fund_flows['nav'].groupby(level='ticker').pct_change()
etf_fund_flows['daily_r'] = np.log(etf_fund_flows['nav']).groupby(level=0).diff()
etf_fund_flows['flow'] = np.log(etf_fund_flows['shares_outstanding']).groupby(level=0).diff()
etf_fund_flows['mktcap'] = etf_fund_flows['nav'] * etf_fund_flows['shares_outstanding']
etf_fund_flows.dropna(inplace=True)
return etf_fund_flows
def calc_etf_return(df):
avg_daily_r = df['daily_r'].mean()
annual_ret_log = avg_daily_r * 252
annual_ret_simple = np.exp(annual_ret_log) - 1
return annual_ret_simple * 100
def calc_investor_return(df):
flows = df['flow'] * (df['mktcap'] / df['mktcap'].iloc[0])
flows.iloc[0] = 1
basis = flows.cumsum()
avg_daily_r = (df['daily_r'] * basis / basis.mean()).mean()
annual_ret_log = avg_daily_r * 252
annual_ret_simple = np.exp(annual_ret_log) - 1
return annual_ret_simple * 100
def compare_annual(df):
tickers = df.index.get_level_values(0).unique().tolist()
out = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
def get_lst_images(file_path):
"""
Reads in all files from file path into a list.
INPUT
file_path: specified file path containing the images.
OUTPUT
List of image strings
"""
return [i for i in os.listdir(file_path) if i != '.DS_Store']
if __name__ == '__main__':
data = | pd.read_csv("../data/Data_Entry_2017.csv") | pandas.read_csv |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
( | UInt32Dtype() | pandas.core.arrays.integer.UInt32Dtype |
"""
Library of standardized plotting functions for basic plot formats
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
from scipy.interpolate import interp1d
from scipy.signal import welch
# Standard field labels
standard_fieldlabels = {'wspd': r'Wind speed [m/s]',
'wdir': r'Wind direction $[^\circ]$',
'u': r'u [m/s]',
'v': r'v [m/s]',
'w': r'Vertical wind speed [m/s]',
'theta': r'$\theta$ [K]',
'thetav': r'$\theta_v$ [K]',
'uu': r'$\langle u^\prime u^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'vv': r'$\langle v^\prime v^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'ww': r'$\langle w^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'uv': r'$\langle u^\prime v^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'uw': r'$\langle u^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'vw': r'$\langle v^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'tw': r'$\langle w^\prime \theta^\prime \rangle \;[\mathrm{Km/s}]$',
'TI': r'TI $[-]$',
'TKE': r'TKE $[\mathrm{m^2/s^2}]$',
}
# Standard field labels for frequency spectra
standard_spectrumlabels = {'u': r'$E_{uu}\;[\mathrm{m^2/s}]$',
'v': r'$E_{vv}\;[\mathrm{m^2/s}]$',
'w': r'$E_{ww}\;[\mathrm{m^2/s}]$',
'theta': r'$E_{\theta\theta}\;[\mathrm{K^2 s}]$',
'thetav': r'$E_{\theta\theta}\;[\mathrm{K^2 s}]$',
'wspd': r'$E_{UU}\;[\mathrm{m^2/s}]$',
}
# Default color cycle
default_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# Supported dimensions and associated names
dimension_names = {
'time': ['time','Time','datetime'],
'height': ['height','heights','z'],
'frequency': ['frequency','f',]
}
# Show debug information
debug = False
def plot_timeheight(datasets,
fields=None,
fig=None,ax=None,
colorschemes={},
fieldlimits=None,
heightlimits=None,
timelimits=None,
fieldlabels={},
labelsubplots=False,
showcolorbars=True,
fieldorder='C',
ncols=1,
subfigsize=(12,4),
plot_local_time=False,
local_time_offset=0,
datasetkwargs={},
**kwargs
):
"""
Plot time-height contours for different datasets and fields
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are MultiIndex Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal ndatasets*nfields
colorschemes : str or dict
Name of colorschemes. If only one field is plotted, colorschemes
can be a string. Otherwise, it should be a dictionary with
entries <fieldname>: name_of_colorschemes
Missing colorschemess are set to 'viridis'
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
heightlimits : list or tuple
Height axis limits
timelimits : list or tuple
Time axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
labelsubplots : bool
Label subplots as (a), (b), (c), ...
showcolorbars : bool
Show colorbar per subplot
fieldorder : 'C' or 'F'
Index ordering for assigning fields and datasets to axes grid
(row by row). Fields is considered the first axis, so 'C' means
fields change slowest, 'F' means fields change fastest.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
plot_local_time : bool
Plot dual x axes with both UTC time and local time
local_time_offset : float
Local time offset from UTC
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets and
fields and can not be used to set dataset or field specific
limits, colorschemess, norms, etc.
Example uses include setting shading, rasterized, etc.
"""
args = PlottingInput(
datasets=datasets,
fields=fields,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
colorschemes=colorschemes,
fieldorder=fieldorder
)
args.set_missing_fieldlimits()
nfields = len(args.fields)
ndatasets = len(args.datasets)
ntotal = nfields * ndatasets
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
sharex=True,
sharey=True,
subfigsize=subfigsize,
hspace=0.2,
fig=fig,
ax=ax
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Initialise list of colorbars
cbars = []
# Loop over datasets, fields and times
for i, dfname in enumerate(args.datasets):
df = args.datasets[dfname]
heightvalues = _get_dim_values(df,'height')
timevalues = _get_dim_values(df,'time')
assert(heightvalues is not None), 'timeheight plot needs a height axis'
assert(timevalues is not None), 'timeheight plot needs a time axis'
if isinstance(timevalues, pd.DatetimeIndex):
# If plot local time, shift timevalues
if plot_local_time:
timevalues = timevalues + pd.to_timedelta(local_time_offset,'h')
# Convert to days since 0001-01-01 00:00 UTC, plus one
numerical_timevalues = mdates.date2num(timevalues.values)
else:
if isinstance(timevalues, pd.TimedeltaIndex):
timevalues = timevalues.total_seconds()
# Timevalues is already a numerical array
numerical_timevalues = timevalues
# Create time-height mesh grid
tst = _get_staggered_grid(numerical_timevalues)
zst = _get_staggered_grid(heightvalues)
Ts,Zs = np.meshgrid(tst,zst,indexing='xy')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
# Pivot all fields in a dataset at once
df_pivot = _get_pivot_table(df,'height',available_fields)
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
# Store plotting options in dictionary
plotting_properties = {
'vmin': args.fieldlimits[field][0],
'vmax': args.fieldlimits[field][1],
'cmap': args.cmap[field]
}
# Index of axis corresponding to dataset i and field j
if args.fieldorder=='C':
axi = i*nfields + j
else:
axi = j*ndatasets + i
# Extract data from dataframe
fieldvalues = _get_pivoted_field(df_pivot,field)
# Gather label, color, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Plot data
im = axv[axi].pcolormesh(Ts,Zs,fieldvalues.T,**plotting_properties)
# Colorbar mark up
if showcolorbars:
cbar = fig.colorbar(im,ax=axv[axi],shrink=1.0)
# Set field label if known
try:
cbar.set_label(args.fieldlabels[field])
except KeyError:
pass
# Save colorbar
cbars.append(cbar)
# Set title if more than one dataset
if ndatasets>1:
axv[axi].set_title(dfname,fontsize=16)
# Format time axis
if isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
ax2 = _format_time_axis(fig,axv[(nrows-1)*ncols:],plot_local_time,local_time_offset,timelimits)
else:
ax2 = None
# Set time limits if specified
if not timelimits is None:
axv[-1].set_xlim(timelimits)
# Set time label
for axi in axv[(nrows-1)*ncols:]:
axi.set_xlabel('time [s]')
if not heightlimits is None:
axv[-1].set_ylim(heightlimits)
# Add y labels
for r in range(nrows):
axv[r*ncols].set_ylabel(r'Height [m]')
# Align time, height and color labels
_align_labels(fig,axv,nrows,ncols)
if showcolorbars:
_align_labels(fig,[cb.ax for cb in cbars],nrows,ncols)
# Number sub figures as a, b, c, ...
if labelsubplots:
for i,axi in enumerate(axv):
axi.text(-0.14,1.0,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Return cbar instead of array if ntotal==1
if len(cbars)==1:
cbars=cbars[0]
if plot_local_time and ax2 is not None:
return fig, ax, ax2, cbars
else:
return fig, ax, cbars
def plot_timehistory_at_height(datasets,
fields=None,
heights=None,
fig=None,ax=None,
fieldlimits=None,
timelimits=None,
fieldlabels={},
cmap=None,
stack_by_datasets=None,
labelsubplots=False,
showlegend=None,
ncols=1,
subfigsize=(12,3),
plot_local_time=False,
local_time_offset=0,
datasetkwargs={},
**kwargs
):
"""
Plot time history at specified height(s) for various dataset(s)
and/or field(s).
By default, data for multiple datasets or multiple heights are
stacked in a single subplot. When multiple datasets and multiple
heights are specified together, heights are stacked in a subplot
per field and per dataset.
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
heights : float, list, 'all' (or None)
Height(s) for which time history is plotted. heights can be
None if all datasets combined have no more than one height
value. 'all' means the time history for all heights in the
datasets will be plotted (in this case all datasets should
have the same heights)
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal nfields * (ndatasets or nheights)
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
timelimits : list or tuple
Time axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
cmap : str
Colormap used when stacking heights
stack_by_datasets : bool (or None)
Flag to specify what is plotted ("stacked") together per subfigure.
If True, stack datasets together, otherwise stack by heights. If
None, stack_by_datasets will be set based on the number of heights
and datasets.
labelsubplots : bool
Label subplots as (a), (b), (c), ...
showlegend : bool (or None)
Label different plots and show legend. If None, showlegend is set
to True if legend will have more than one entry, otherwise it is
set to False.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
plot_local_time : bool
Plot dual x axes with both UTC time and local time
local_time_offset : float
Local time offset from UTC
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets,
fields and heights, and they can not be used to set dataset,
field or height specific colors, limits, etc.
Example uses include setting linestyle/width, marker, etc.
"""
# Avoid FutureWarning concerning the use of an implicitly registered
# datetime converter for a matplotlib plotting method. The converter
# was registered by pandas on import. Future versions of pandas will
# require explicit registration of matplotlib converters, as done here.
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
args = PlottingInput(
datasets=datasets,
fields=fields,
heights=heights,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
)
nfields = len(args.fields)
nheights = len(args.heights)
ndatasets = len(args.datasets)
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
# Set up subplot grid
if stack_by_datasets is None:
if nheights>1:
stack_by_datasets = False
else:
stack_by_datasets = True
if stack_by_datasets:
ntotal = nfields*nheights
else:
ntotal = nfields*ndatasets
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
sharex=True,
subfigsize=subfigsize,
hspace=0.2,
fig=fig,
ax=ax
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Set showlegend if not specified
if showlegend is None:
if (stack_by_datasets and ndatasets>1) or (not stack_by_datasets and nheights>1):
showlegend = True
else:
showlegend = False
# Loop over datasets and fields
for i,dfname in enumerate(args.datasets):
df = args.datasets[dfname]
timevalues = _get_dim_values(df,'time',default_idx=True)
assert(timevalues is not None), 'timehistory plot needs a time axis'
heightvalues = _get_dim_values(df,'height')
if isinstance(timevalues, pd.TimedeltaIndex):
timevalues = timevalues.total_seconds()
# If plot local time, shift timevalues
if plot_local_time and isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
timevalues = timevalues + pd.to_timedelta(local_time_offset,'h')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
# If any of the requested heights is not available,
# pivot the dataframe to allow interpolation.
# Pivot all fields in a dataset at once to reduce computation time
if (not heightvalues is None) and (not all([h in heightvalues for h in args.heights])):
df_pivot = _get_pivot_table(df,'height',available_fields)
pivoted = True
if debug: print('Pivoting '+dfname)
else:
pivoted = False
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
for k, height in enumerate(args.heights):
# Store plotting options in dictionary
# Set default linestyle to '-' and no markers
plotting_properties = {
'linestyle':'-',
'marker':None,
}
# Axis order, label and title depend on value of stack_by_datasets
if stack_by_datasets:
# Index of axis corresponding to field j and height k
axi = k*nfields + j
# Use datasetname as label
if showlegend:
plotting_properties['label'] = dfname
# Set title if multiple heights are compared
if nheights>1:
axv[axi].set_title('z = {:.1f} m'.format(height),fontsize=16)
# Set colors
plotting_properties['color'] = default_colors[i % len(default_colors)]
else:
# Index of axis corresponding to field j and dataset i
axi = i*nfields + j
# Use height as label
if showlegend:
plotting_properties['label'] = 'z = {:.1f} m'.format(height)
# Set title if multiple datasets are compared
if ndatasets>1:
axv[axi].set_title(dfname,fontsize=16)
# Set colors
if cmap is not None:
cmap = mpl.cm.get_cmap(cmap)
plotting_properties['color'] = cmap(k/(nheights-1))
else:
plotting_properties['color'] = default_colors[k % len(default_colors)]
# Extract data from dataframe
if pivoted:
signal = interp1d(heightvalues,_get_pivoted_field(df_pivot,field).values,axis=-1,fill_value="extrapolate")(height)
else:
slice_z = _get_slice(df,height,'height')
signal = _get_field(slice_z,field).values
# Gather label, color, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Plot data
axv[axi].plot(timevalues,signal,**plotting_properties)
# Set field label if known
try:
axv[axi].set_ylabel(args.fieldlabels[field])
except KeyError:
pass
# Set field limits if specified
try:
axv[axi].set_ylim(args.fieldlimits[field])
except KeyError:
pass
# Set axis grid
for axi in axv:
axi.xaxis.grid(True,which='minor')
axi.yaxis.grid(True)
# Format time axis
if isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
ax2 = _format_time_axis(fig,axv[(nrows-1)*ncols:],plot_local_time,local_time_offset,timelimits)
else:
ax2 = None
# Set time limits if specified
if not timelimits is None:
axv[-1].set_xlim(timelimits)
# Set time label
for axi in axv[(nrows-1)*ncols:]:
axi.set_xlabel('time [s]')
# Number sub figures as a, b, c, ...
if labelsubplots:
for i,axi in enumerate(axv):
axi.text(-0.14,1.0,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Add legend
if showlegend:
leg = _format_legend(axv,index=ncols-1)
# Align labels
_align_labels(fig,axv,nrows,ncols)
if plot_local_time and ax2 is not None:
return fig, ax, ax2
else:
return fig, ax
def plot_profile(datasets,
fields=None,
times=None,
fig=None,ax=None,
fieldlimits=None,
heightlimits=None,
fieldlabels={},
cmap=None,
stack_by_datasets=None,
labelsubplots=False,
showlegend=None,
fieldorder='C',
ncols=None,
subfigsize=(4,5),
datasetkwargs={},
**kwargs
):
"""
Plot vertical profile at specified time(s) for various dataset(s)
and/or field(s).
By default, data for multiple datasets or multiple times are
stacked in a single subplot. When multiple datasets and multiple
times are specified together, times are stacked in a subplot
per field and per dataset.
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
times : str, int, float, list (or None)
Time(s) for which vertical profiles are plotted, specified as
either datetime strings or numerical values (seconds, e.g.,
simulation time). times can be None if all datasets combined
have no more than one time value.
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal nfields * (ndatasets or ntimes)
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
heightlimits : list or tuple
Height axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
cmap : str
Colormap used when stacking times
stack_by_datasets : bool (or None)
Flag to specify what is plotted ("stacked") together per subfigure.
If True, stack datasets together, otherwise stack by times. If
None, stack_by_datasets will be set based on the number of times
and datasets.
labelsubplots : bool
Label subplots as (a), (b), (c), ...
showlegend : bool (or None)
Label different plots and show legend. If None, showlegend is set
to True if legend will have more than one entry, otherwise it is
set to False.
fieldorder : 'C' or 'F'
Index ordering for assigning fields and datasets/times (depending
on stack_by_datasets) to axes grid (row by row). Fields is considered the
first axis, so 'C' means fields change slowest, 'F' means fields
change fastest.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets,
fields and times, and they can not be used to set dataset,
field or time specific colors, limits, etc.
Example uses include setting linestyle/width, marker, etc.
"""
args = PlottingInput(
datasets=datasets,
fields=fields,
times=times,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
fieldorder=fieldorder,
)
nfields = len(args.fields)
ntimes = len(args.times)
ndatasets = len(args.datasets)
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
# Set up subplot grid
if stack_by_datasets is None:
if ntimes>1:
stack_by_datasets = False
else:
stack_by_datasets = True
if stack_by_datasets:
ntotal = nfields * ntimes
else:
ntotal = nfields * ndatasets
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
default_ncols=int(ntotal/nfields),
fieldorder=args.fieldorder,
avoid_single_column=True,
sharey=True,
subfigsize=subfigsize,
hspace=0.4,
fig=fig,
ax=ax,
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Set showlegend if not specified
if showlegend is None:
if (stack_by_datasets and ndatasets>1) or (not stack_by_datasets and ntimes>1):
showlegend = True
else:
showlegend = False
# Loop over datasets, fields and times
for i, dfname in enumerate(args.datasets):
df = args.datasets[dfname]
heightvalues = _get_dim_values(df,'height',default_idx=True)
assert(heightvalues is not None), 'profile plot needs a height axis'
timevalues = _get_dim_values(df,'time')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
# Pivot all fields in a dataset at once
if not timevalues is None:
df_pivot = _get_pivot_table(df,'height',available_fields)
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
for k, time in enumerate(args.times):
plotting_properties = {}
# Axis order, label and title depend on value of stack_by_datasets
if stack_by_datasets:
# Index of axis corresponding to field j and time k
if args.fieldorder == 'C':
axi = j*ntimes + k
else:
axi = k*nfields + j
# Use datasetname as label
if showlegend:
plotting_properties['label'] = dfname
# Set title if multiple times are compared
if ntimes>1:
if isinstance(time, (int,float,np.number)):
tstr = '{:g} s'.format(time)
else:
tstr = pd.to_datetime(time).strftime('%Y-%m-%d %H%M UTC')
axv[axi].set_title(tstr, fontsize=16)
# Set color
plotting_properties['color'] = default_colors[i % len(default_colors)]
else:
# Index of axis corresponding to field j and dataset i
if args.fieldorder == 'C':
axi = j*ndatasets + i
else:
axi = i*nfields + j
# Use time as label
if showlegend:
if isinstance(time, (int,float,np.number)):
plotting_properties['label'] = '{:g} s'.format(time)
else:
plotting_properties['label'] = | pd.to_datetime(time) | pandas.to_datetime |
from sklearn.metrics import *
from tqdm import tqdm
import pandas as pd
import time
from .models import *
from .databunch import DataBunch
from .encoders import *
##################################### BestSingleModel ################################################
class BestSingleModel(XGBoost):
"""
Trying to find which model work best on our data
Args:
params (dict or None): parameters for model.
If None default params are fetched.
"""
def _make_model(self, model_name, databunch=None, model_param=None, wrapper_params=None,):
'''
Make new model and choose model library from 'model_name'
'''
if databunch is None:
databunch=self._data
model = all_models[model_name](
databunch=databunch,
cv = self._cv,
score_cv_folds = self._score_cv_folds,
opt_lvl=self._opt_lvl,
metric=self.metric,
combined_score_opt=self._combined_score_opt,
metric_round=self._metric_round,
model_param=model_param,
wrapper_params=wrapper_params,
gpu=self._gpu,
random_state=self._random_state,
type_of_estimator=self.type_of_estimator
)
return(model)
def _opt_model(self, trial):
'''
now we can choose models in optimization
'''
model_name = trial.suggest_categorical('model_name', self.models_names)
model = self._make_model(model_name,)
model.model_param = model.get_model_opt_params(trial=trial,
model=model,
opt_lvl=model._opt_lvl,
metric_name=model.metric.__name__,
)
return(model)
def opt(self,
timeout=1000,
early_stoping=100,
cold_start=100,
direction='maximize',
opt_lvl=3,
cv=None,
score_cv_folds=None,
auto_parameters=True,
models_names=None, #list models_names for opt
feature_selection=True,
verbose=1,
):
'''
Custom opt func whis list:models_names
now we can choose models in optimization
'''
if cold_start is not None:
self._cold_start = cold_start
if self.direction is None:
self.direction = direction
if opt_lvl is not None:
self._opt_lvl = opt_lvl
if cv is not None:
self._cv = cv
if score_cv_folds is not None:
self._score_cv_folds = score_cv_folds
if auto_parameters is not None:
self._auto_parameters = auto_parameters
if models_names is None:
self.models_names = all_models.keys()
else:
self.models_names = models_names
# Opt
history = self._opt_core(
timeout,
early_stoping,
feature_selection,
verbose)
return(history)
def _predict_preproc_model(self, model_cfg, model,):
"""
custom function for predict, now we can choose model library
"""
model = self._make_model(model_cfg['model_name'], databunch=self._data)
model.model_param = model_cfg['model_param']
model.wrapper_params = model_cfg['wrapper_params']
return(model)
class BestSingleModelClassifier(BestSingleModel):
type_of_estimator='classifier'
class BestSingleModelRegressor(BestSingleModel):
type_of_estimator='regression'
##################################### ModelsReview ################################################
class ModelsReview(BestSingleModel):
"""
ModelsReview...
"""
__name__ = 'ModelsReview'
def fit(self,
models_names=None,
verbose=1,
):
"""
Fit models (in list models_names) whis default params
"""
history_fits = pd.DataFrame()
if models_names is None:
self.models_names = all_models.keys()
else:
self.models_names = models_names
if verbose > 0:
disable_tqdm = False
else:
disable_tqdm = True
for model_name in tqdm(self.models_names, disable=disable_tqdm):
# Model
model_tmp = all_models[model_name](databunch=self._data,
cv=self._cv,
score_cv_folds = self._cv,
metric=self.metric,
direction=self.direction,
metric_round=self._metric_round,
combined_score_opt=self._combined_score_opt,
gpu=self._gpu,
random_state=self._random_state,
type_of_estimator=self.type_of_estimator)
# fit
config = model_tmp.fit()
history_fits = history_fits.append(config, ignore_index=True)
model_tmp = None
self.history_trials_dataframe = history_fits
return(history_fits)
def opt(self,
timeout=1000,
early_stoping=100,
auto_parameters=False,
feature_selection=True,
direction=None,
verbose=1,
models_names=None,
):
if direction is not None:
self.direction = direction
if self.direction is None:
raise Exception('Need direction for optimaze!')
if models_names is None:
self.models_names = all_models.keys()
else:
self.models_names = models_names
self.history_trials_dataframe = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import ahocorasick
import math
import os
import re
import sys
import shutil
import glob
import xlsxwriter
import subprocess
from functools import partial
from itertools import product, combinations
from subprocess import DEVNULL
from multiprocessing import Pool
from threading import Timer
import random
import pandas as pd
import tqdm
import primer3
from Bio import SeqIO, Seq
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__version__ = "1.0"
__email__ = "<EMAIL>"
__status__ = "Production"
class PRIMeval:
def __init__(self, run_id, max_primer_mismatches, max_probe_mismatches, max_product_size, cross_check, probes_only, method, dimer_check,
primerMonovalentCations, primerDivalentCations, primerDNTPs, primerConcentration, primerAnnealingTemp,
probeMonovalentCations, probeDivalentCations, probeDNTPs, probeConcentration, probeAnnealingTemp, prebuilt = ""):
# parameters
self.run_id = run_id
self.max_primer_mismatches = int(max_primer_mismatches)
self.max_probe_mismatches = int(max_probe_mismatches)
self.max_product_size = int(max_product_size)
self.max_mismatches = max(max_primer_mismatches, max_probe_mismatches)
if self.max_mismatches == 0:
self.l, self.e, self.qcov, self.perciden = 5, 10, 100, 100
elif self.max_mismatches == 1:
self.l, self.e, self.qcov, self.perciden = 5, 40, 90, 90
elif self.max_mismatches == 2:
self.l, self.e, self.qcov, self.perciden = 5, 70, 85, 85
else:
self.l, self.e, self.qcov, self.perciden = 5, 100, 80, 80
self.prebuilt = str(prebuilt)
self.bowtie_dbs = ["list_of_prebuilt_dbs_in_prebuilt_folder"]
self.bowtie_runs = []
self.blast_db_name = "user_db"
self.bowtie_index_name = "bindex"
self.num_threads = 48
self.method = method
if dimer_check == "True":
self.dimer_check = True
else:
self.dimer_check = False
if cross_check == "True":
self.same_package = False
else:
self.same_package = True
if probes_only == "True":
self.probes_only = True
else:
self.probes_only = False
# Cross dimer check
self.primer_monovalent_cations = str(primerMonovalentCations)
self.primer_divalent_cations = str(primerDivalentCations)
self.primer_dntps = str(primerDNTPs)
self.primer_annealing_oligo = str(primerConcentration)
self.primer_annealing_temp = str(primerAnnealingTemp)
self.probe_monovalent_cations = str(probeMonovalentCations)
self.probe_divalent_cations = str(probeDivalentCations)
self.probe_dntps = str(probeDNTPs)
self.probe_annealing_oligo = str(probeConcentration)
self.probe_annealing_temp = str(probeAnnealingTemp)
self.cross_dimer_dfs = []
self.cross_dimer_dfs_dg = []
self.hairpin_dfs = []
# Aho-Corasick Automaton
self.aho = ahocorasick.Automaton()
# folders
self.base_folder = os.getcwd() + "/"
self.run_folder = self.base_folder + "runs/" + str(self.run_id) + "/"
self.input_folder = self.run_folder + "input/"
self.output_folder = self.run_folder + "output/"
self.tmp_folder = self.run_folder + "tmp/"
self.input_contigs = self.run_folder + "input/contigs/"
self.primer_input_folder = self.run_folder + "input/primers/"
self.probes_input_folder = self.run_folder + "input/probes/"
self.blast_db_folder = self.run_folder + "tmp/blastdb/"
self.prebuilt_genomes = self.base_folder + "prebuilt/genomes/"
self.prebuilt_bowtie = self.base_folder + "prebuilt/bowtie/"
# files
self.output_contigs = self.run_folder + "tmp/merged_contigs.fasta"
self.blast_output_tmp_file = self.run_folder + "tmp/blast_tmp_results.txt"
self.blast_output_file = self.run_folder + "tmp/blast_results.txt"
self.bowtie_output_tmp_file = self.run_folder + "tmp/bowtie_tmp_results.txt"
self.bowtie_output_file = self.run_folder + "tmp/bowtie_results.txt"
self.bowtie_index_folder = self.run_folder + "tmp/bowtie_index_folder/"
self.oligo_file = self.run_folder + "output/oligos.fasta"
self.results_all = self.run_folder + "output/results.csv"
self.results_wob = self.run_folder + "output/results_wobbled.csv"
self.results_dimers = self.run_folder + "output/results_dimers.xlsx"
# settings
self.blastdb_cmd = "/path/to/makeblastdb"
self.bowtie_build_cmd = "/path/to/bowtie-build"
self.blast_cmd = "/path/to/blastn"
self.bowtie_cmd = "/path/to/bowtie"
self.faidx_cmd = "/path/to/samtools faidx "
self.pd_col_hits = ["Sequence", "Type", "Name", "Package", "StartPos", "EndPos", "MismatchesTotal",
"Strand", "HitSequence", "Tm", "dG"]
self.pd_col_results = ["Sequence", "Contig", "Primer1", "Primer2", "Probe", "Primer1Package",
"Primer2Package", "ProbePackage", "StartPos1", "EndPos1", "StartPos2", "EndPos2",
"StartPos3", "EndPos3", "Primer1Tm", "Primer2Tm", "ProbeTm", "Primer1dG", "Primer2dG", "ProbedG", "ProductSize", "ProductTm", "NoMismatchesLeft", "NoMismatchesRight",
"NoMismatchesProbe", "MismatchesLeft", "MismatchesRight", "MismatchesProbe",
"Comment", "Product"]
self.blast_txt_params = "\"6 qseqid sseqid nident qlen length mismatch qstart qend sstart sseq sstrand " \
"send\""
self.blast_txt_fields = ["qseqid", "sseqid", "nident", "qlen", "length", "mismatch", "qstart", "qend",
"sstart", "sseq", "sstrand", "send"]
# return list of all possible sequences given an ambiguous DNA input
def _extend_ambiguous_dna(self, seq):
d = Seq.IUPAC.IUPACData.ambiguous_dna_values
return list(map("".join, product(*map(d.get, seq))))
def _get_sequence(self, contig_file, wanted_contig, start, end, strand=1):
try:
command = self.faidx_cmd + contig_file + " '" + wanted_contig + ":" + str(start) + "-" + str(end) + "'"
call = subprocess.check_output(command, shell=True, stderr=subprocess.DEVNULL).decode().split("\n", 1)[1]
except:
try:
contig_file = self.prebuilt_genomes + wanted_contig.split("__contigname__", 1)[0] + ".fasta"
command = self.faidx_cmd + contig_file + " '" + wanted_contig + ":" + str(start) + "-" + str(end) + "'"
call = subprocess.check_output(command, shell=True, stderr=subprocess.DEVNULL).decode().split("\n", 1)[1]
except:
sys.exit("Failed retrieving: " + command)
call = re.sub("\n|\r", "", call)
sequence = Seq.Seq(call)
if strand == 1:
return sequence.upper()
else:
return sequence.reverse_complement().upper()
# Get a visual representation of mismatches between two sequences
def _mismatch_visualization(self, seq_a, seq_b):
seq_a, seq_b = seq_a.upper(), seq_b.upper()
mismatches = ""
if (len(seq_a) - len(seq_b) != 0):
return "Error"
for pos in range(0, len(seq_a)):
if seq_a[pos] != seq_b[pos]:
mismatches += "(" + seq_a[pos] + "/" + seq_b[pos] + ")"
else:
mismatches += "="
return mismatches
def _prepare_folders(self):
if os.path.exists(self.output_folder):
shutil.rmtree(self.output_folder)
if os.path.exists(self.tmp_folder):
shutil.rmtree(self.tmp_folder)
# Create output and tmp folders
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if not os.path.exists(self.tmp_folder):
os.makedirs(self.tmp_folder)
if not os.path.exists(self.bowtie_index_folder):
os.makedirs(self.bowtie_index_folder)
def _clean_up_folders(self):
# if os.path.exists(self.input_folder):
# shutil.rmtree(self.input_folder)
if os.path.exists(self.tmp_folder):
shutil.rmtree(self.tmp_folder)
# Rename primer and probes, create new sequences without IUPAC codes and save in file
# Used for dimer check: self.packages, packages, package, oligo_name
def _import_oligos(self, folder, oligotype):
packages = {}
primer_records = []
allowed_chars = "[^0-9a-zA-Z()'_\+-]+"
for file in os.listdir(folder):
if file.endswith(".fasta"):
package = file.rsplit(".fasta", 1)[0]
packages[package] = {}
sequences = SeqIO.parse(open(folder + file), "fasta")
for fasta in sequences:
m = re.search("[M,R,W,S,Y,K,V,H,D,B,N]", str(fasta.seq))
if m:
sequence_mutations = self._extend_ambiguous_dna(str(fasta.seq))
mutation_count = 0
for mutation in sequence_mutations:
mutation_count += 1
oligo_name = re.sub(allowed_chars, "_", fasta.description) + "_mut" + str(mutation_count)
packages[package][oligo_name] = str(mutation)
if oligotype == "probe":
rec = SeqRecord(Seq.Seq(mutation, IUPAC),
id=package + "^" + re.sub(allowed_chars, "_",
fasta.description) + "_mut" + str(
mutation_count) + "_probe", description="")
else:
rec = SeqRecord(Seq.Seq(mutation, IUPAC),
id=package + "^" + re.sub(allowed_chars, "_",
fasta.description) + "_mut" + str(
mutation_count), description="")
primer_records.append(rec)
else:
oligo_name = re.sub(allowed_chars, "_", fasta.description)
packages[package][oligo_name] = str(fasta.seq)
if oligotype == "probe":
rec = SeqRecord(fasta.seq, id=package + "^" + re.sub(allowed_chars, "_",
fasta.description) + "_probe",
description="")
else:
rec = SeqRecord(fasta.seq,
id=package + "^" + re.sub(allowed_chars, "_", fasta.description),
description="")
primer_records.append(rec)
output_handle = open(self.oligo_file, "a")
SeqIO.write(primer_records, output_handle, "fasta")
output_handle.close()
if oligotype == "primer":
self.primer_packages = packages
else:
self.probe_packages = packages
# Rename and merge contigs
def _import_contigs(self):
seq_records = []
for file in os.listdir(self.input_contigs):
# CHANGE: other file endings should also be possible (see with Django upload permitted filenames)
if file.endswith(".fasta"):
base_contig_name = file.replace(".fasta", "")
for entry in SeqIO.parse(self.input_contigs + file, "fasta"):
my_new_id = base_contig_name + "__contigname__" + entry.id
seq_records.append(SeqRecord(entry.seq, id=my_new_id, description=""))
output_handle = open(self.output_contigs, "w")
SeqIO.write(seq_records, output_handle, "fasta")
output_handle.close()
command = self.faidx_cmd + self.output_contigs
subprocess.call(command, shell=True)
def _import_sequences(self):
if self.probes_only == False:
self._import_oligos(self.primer_input_folder, "primer")
self._import_oligos(self.probes_input_folder, "probe")
self._import_contigs()
def _create_blast_db(self):
command = self.blastdb_cmd + " -in " + self.output_contigs + " -dbtype nucl -out " + self.blast_db_folder + self.blast_db_name
subprocess.call(command, shell=True)
def _create_bowtie_index(self):
command = self.bowtie_build_cmd + " --threads " + str(
self.num_threads) + " -f " + self.output_contigs + " " + self.bowtie_index_folder + self.bowtie_index_name
subprocess.call(command, shell=True)
def _blast_call(self):
command = self.blast_cmd + " -db " + self.blast_db_folder + self.blast_db_name + " -query " + self.oligo_file + " -out " + \
self.blast_output_tmp_file + " -outfmt " + self.blast_txt_params + " -num_threads " + str(
self.num_threads) + " -evalue 200000 " \
"-qcov_hsp_perc " + str(self.qcov) + " -perc_identity " + str(self.perciden) + " -max_target_seqs 2000000 -word_size 4 -ungapped"
subprocess.call(command, shell=True)
with open(self.blast_output_file, "a") as out_file:
with open(self.blast_output_tmp_file) as in_file:
out_file.write(in_file.read())
def _bowtie_call(self, index_folder = "", index_name = ""):
mismatches = self.max_primer_mismatches if self.max_primer_mismatches >= self.max_probe_mismatches else self.max_probe_mismatches
if index_folder == "" and index_name == "":
if os.path.getsize(self.output_contigs) == 0:
return
index_folder = self.bowtie_index_folder
index_name = self.bowtie_index_name
command = self.bowtie_cmd + " -f -a -p " + str(self.num_threads) + " -n " + str(
mismatches) + " -l " + str(self.l) + " -e " + str(self.e) + " " + index_folder + index_name + " " + self.oligo_file + " " + self.bowtie_output_tmp_file
subprocess.call(command, shell=True)
with open(self.bowtie_output_file, "a") as out_file:
with open(self.bowtie_output_tmp_file) as in_file:
out_file.write(in_file.read())
def _specificity_calls(self):
for db in self.bowtie_runs:
self._bowtie_call(self.prebuilt_bowtie, db)
def _multiprocess_convert_bowtie_to_blast(self):
# in case no hits are returned
try:
df = pd.read_csv(self.bowtie_output_file, sep="\t", header=None)
except pd.errors.EmptyDataError:
df = pd.DataFrame(columns = ["", "", "", "", "", "", "", "", "", "", "", ""])
split_df = self.splitDataFrameIntoChunks(df)
func = partial(self._convert_bowtie_to_blast)
with Pool(self.num_threads) as p:
multiprocessing_results = list(tqdm.tqdm(p.imap(func, split_df), total=len(split_df)))
self.df_bowtie = pd.DataFrame(
columns=["qseqid", "sseqid", "nident", "qlen", "length", "mismatch", "qstart", "qend", "sstart", "sseq",
"sstrand", "send"])
self.df_bowtie = pd.concat(multiprocessing_results, ignore_index=True)
def _convert_bowtie_to_blast(self, df):
df_bowtie = pd.DataFrame(
columns=["qseqid", "sseqid", "nident", "qlen", "length", "mismatch", "qstart", "qend", "sstart", "sseq",
"sstrand", "send"])
for index, line in df.iterrows():
mismatch = str(line[7]).count(":")
if line[0].endswith("_probe") and mismatch > self.max_probe_mismatches:
continue
if not line[0].endswith("_probe") and mismatch > self.max_primer_mismatches:
continue
sstrand = "plus" if line[1] == "+" else "minus"
qseqid = line[0]
sseqid = line[2]
qlen = len(line[4])
length = qlen
qstart = 1
qend = qlen
sstart = int(line[3]) + 1
send = sstart + qlen - 1
nident = qlen - mismatch
if sstrand == "minus":
temp_swap = sstart
sstart = send
send = temp_swap
if mismatch == 0:
sseq = str(Seq.Seq(line[4]).reverse_complement())
else:
sseq = self._resolve_bowtie_mismtches(line[4], line[7], -1)
else:
if mismatch == 0:
sseq = line[4]
else:
sseq = self._resolve_bowtie_mismtches(line[4], line[7], 1)
df_bowtie.loc[len(df_bowtie)] = [str(qseqid), str(sseqid), str(nident), str(qlen), str(length),
str(mismatch), str(qstart),
str(qend), str(sstart), str(sseq), str(sstrand), str(send)]
return df_bowtie
def _resolve_bowtie_mismtches(self, sequence, mismatches, strand):
sequence = Seq.Seq(sequence) if strand == 1 else Seq.Seq(sequence).reverse_complement()
mismatches = mismatches.split(",")
for mismatch in mismatches:
position, base = mismatch.split(":", 1)
position = int(position)
base = base[0] if strand == 1 else Seq.Seq(base[0]).reverse_complement()
sequence = sequence[:position] + base + sequence[position+1:]
return str(sequence)
def _split_output(self):
if self.method == "blast":
# in case no hits are returned
try:
df_blast = pd.read_csv(self.blast_output_file, sep="\t", header=None)
except pd.errors.EmptyDataError:
df_blast = pd.DataFrame(columns = ["", "", "", "", "", "", "", "", "", "", "", ""])
self.df_blast_split = self.splitDataFrameIntoChunks(df_blast)
if self.method == "bowtie":
self.df_bowtie_split = self.splitDataFrameIntoChunks(self.df_bowtie)
if self.method == "aho-corasick":
self.df_aho_split = self.splitDataFrameIntoChunks(self.df_aho)
def splitDataFrameIntoChunks(self, df):
chunkSize = math.ceil(len(df) / self.num_threads)
if chunkSize == 0:
chunkSize = 1
listOfDf = list()
numberChunks = len(df) // chunkSize + 1
for i in range(numberChunks):
listOfDf.append(df[i * chunkSize:(i + 1) * chunkSize])
return listOfDf
def _multiprocess_split_files(self):
if self.method == "blast":
input_files = self.df_blast_split
if self.method == "bowtie":
input_files = self.df_bowtie_split
if self.method == "aho-corasick":
input_files = self.df_aho_split
func = partial(self._parse_blastlike_results_df)
with Pool(self.num_threads) as p:
multiprocessing_results = list(tqdm.tqdm(p.imap(func, input_files), total=len(input_files)))
self.hits = pd.concat(multiprocessing_results, ignore_index=True)
hits_output = self.hits.copy()
if len(hits_output.index) > 0:
hits_output[['Sequence', 'Contig']] = hits_output['Sequence'].str.split("__contigname__", 1, expand=True)
hits_output = hits_output[
['Sequence', 'Contig', 'Type', 'Name', 'Package', 'StartPos', 'EndPos', 'MismatchesTotal', 'Strand',
'HitSequence', 'Tm', 'dG']]
tmp = hits_output['Name'].str.rsplit("_probe", 1, expand = True)
hits_output['Name'] = tmp[0]
hits_output.to_csv(self.output_folder + "all_hits.csv", index=False, sep=";")
def _process_probes_only(self):
probes_df = self.hits[(self.hits['Type'] == "Probe")]
if len(probes_df.index) > 0:
oligos_full_sequences = SeqIO.index(self.oligo_file, "fasta")
probes_df = probes_df.drop(columns = ['Type', 'Strand'])
probes_df = probes_df.rename(columns = {'Name': 'Probe', 'Package': 'ProbePackage', 'MismatchesTotal': 'NoMismatchesProbe'})
probes_df[['Sequence', 'Contig']] = probes_df['Sequence'].str.split("__contigname__", 1, expand = True)
probes_df['MismatchesProbe'] = probes_df.apply(lambda x: self._mismatch_visualization(oligos_full_sequences[x['ProbePackage'] + "^" + x['Probe']].seq, x['HitSequence']), axis=1)
probes_df = probes_df[['Sequence', 'Contig', 'Probe', 'ProbePackage', 'StartPos', 'EndPos', 'NoMismatchesProbe', 'MismatchesProbe', 'HitSequence', 'Tm' ,'dG']]
tmp = probes_df['Probe'].str.rsplit("_probe", 1, expand = True)
probes_df['Probe'] = tmp[0]
probes_df.to_csv(self.results_all, index=False, sep=";")
# parse wobbled primers
subset = probes_df[probes_df['Probe'].str.contains("_mut")]
subset_r = subset.replace(['_mut([0-9])+'], [''], regex=True)
# hits without mutations
unique = probes_df.merge(subset, indicator=True, how="outer")
unique = unique[unique['_merge'] == 'left_only']
unique = unique.drop("_merge", axis=1)
results2 = | pd.DataFrame(columns=['Sequence', 'Contig', 'Probe', 'ProbePackage', 'StartPos', 'EndPos', 'NoMismatchesProbe', 'MismatchesProbe', 'HitSequence', 'Tm', 'dG']) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.