prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pandas as pd
# Rpy
import rpy2.robjects as rpy
from rpy2.robjects import numpy2ri
rpy.r('suppressMessages(library(selectiveInference)); suppressMessages(library(knockoff))') # R libraries we will use
rpy.r("""
estimate_sigma_data_splitting = function(X,y, verbose=FALSE){
nrep = 10
sigma_est = 0
nest = 0
for (i in 1:nrep){
n=nrow(X)
m=floor(n/2)
subsample = sample(1:n, m, replace=FALSE)
leftover = setdiff(1:n, subsample)
CV = cv.glmnet(X[subsample,], y[subsample], standardize=FALSE, intercept=FALSE, family="gaussian")
beta_hat = coef(CV, s="lambda.min")[-1]
selected = which(beta_hat!=0)
if (verbose){
print(c("nselected", length(selected)))
}
if (length(selected)>0){
LM = lm(y[leftover]~X[leftover,][,selected])
sigma_est = sigma_est+sigma(LM)
nest = nest+1
}
}
return(sigma_est/nest)
}
""")
def gaussian_setup(X, Y, run_CV=True):
"""
Some calculations that can be reused by methods:
lambda.min, lambda.1se, lambda.theory and Reid et al. estimate of noise
"""
n, p = X.shape
Xn = X / np.sqrt((X**2).sum(0))[None, :]
numpy2ri.activate()
rpy.r.assign('X', X)
rpy.r.assign('Y', Y)
numpy2ri.deactivate()
rpy.r('X=as.matrix(X)')
rpy.r('Y=as.numeric(Y)')
l_theory = np.fabs(Xn.T.dot(np.random.standard_normal((n, 500)))).max(1).mean() * np.ones(p)
if run_CV:
numpy2ri.activate()
rpy.r.assign('X', X)
rpy.r.assign('Y', Y)
rpy.r('X=as.matrix(X)')
rpy.r('Y=as.numeric(Y)')
rpy.r('G = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)')
rpy.r('sigma_reid = selectiveInference:::estimate_sigma(X, Y, coef(G, s="lambda.min")[-1]) # sigma via Reid et al.')
rpy.r("L = G[['lambda.min']]")
rpy.r("L1 = G[['lambda.1se']]")
L = rpy.r('L')
L1 = rpy.r('L1')
sigma_reid = rpy.r('sigma_reid')[0]
numpy2ri.deactivate()
return L * np.sqrt(X.shape[0]) * 1.0001, L1 * np.sqrt(X.shape[0]) * 1.0001, l_theory, sigma_reid
else:
return None, None, l_theory, None
def BHfilter(pval, q=0.2):
numpy2ri.activate()
rpy.r.assign('pval', np.asarray(pval))
rpy.r.assign('q', q)
rpy.r('Pval = p.adjust(pval, method="BH")')
rpy.r('S = which((Pval < q)) - 1')
S = rpy.r('S')
numpy2ri.deactivate()
return np.asarray(S, np.int)
def summarize(groupings,
results_df,
summary):
grouped = results_df.groupby(groupings, as_index=False)
summaries = []
summaries = [(n, summary(g)) for n, g in grouped]
summary_df = | pd.concat([s for _, s in summaries]) | pandas.concat |
#!/usr/bin/env python3
"""
File name: netsim.py
Author: <NAME>
email: <EMAIL>
Date created: 02/09/2017 (DD/MM/YYYY)
Python Version: 3.5
Description:
Core module which generates the physical network of sticks which is used to
produce the electrical network. The total physical and electrical network is included in the RandomConductingNetwork class. the specific class RandomCNTNetwork is a special case of RandomConductingNetwork.
"""
import argparse, os, time,traceback,sys
import numpy as np
import pandas as pd
import matplotlib
from cnet import ConductionNetwork, Resistor, FermiDiracTransistor, LinExpTransistor
import networkx as nx
import scipy.spatial as spatial
from timeit import default_timer as timer
from datetime import datetime
class RandomConductingNetwork(object):
"""
"""
def __init__(self, n=2,scaling=5, l='exp', pm=0.135 , fname='', directory='data', notes='', seed=0,
onoffmap=0, element = LinExpTransistor):
self.scaling=scaling
self.n=n
self.pm=pm
self.l=l
self.notes=notes
self.directory=directory
self.percolating=False
self.onoffmap=onoffmap
self.element=element
#seeds are included to ensure proper randomness on distributed computing
if seed:
self.seed=seed
else:
self.seed=np.random.randint(low=0,high=2**32)
np.random.seed(self.seed)
if not(fname):
self.sticks, self.intersects = self.make_intersects_kdtree( self.make_sticks(n, l=l, pm=pm, scaling=scaling))
self.make_cnet()
self.fname=self.make_fname()
else:
self.fname=fname
self.load_system(os.path.join(directory,fname))
def get_info(self):
print('=== input parameters ===')
print('number of sticks: {}'.format(self.n))
print('stick length : {} \u00b1 {} \u03bcm'.format(0.66,0.44))
print('percentage metallic: {} %'.format(self.pm))
print('\n=== physical network characteristics ===')
print('device region: {}x{} \u03bcm'.format(self.scaling,self.scaling))
print('stick density: {} sticks/\u03bcm^2'.format(self.n/self.scaling**2))
print('number of clusters: {}'.format(len(self.clustersizes)))
print('size of stick clusters: {:.2f} \u00b1 {:.2f} sticks'.format( self.clustersizes.mean(), self.clustersizes.std()))
print('maximum cluster size: {} sticks'.format( self.clustersizes.max()))
print('\n=== electrical characteristics ===')
print("device conducting: {}".format(self.percolating))
if self.percolating:
print('driving voltage: {} V'.format(self.cnet.vds))
current=sum(self.cnet.source_currents)
currentlist=nx.to_pandas_edgelist(self.cnet.graph).current
currentmean=currentlist.mean()
currentvar=currentlist.std()
print('device current: {:.2f} A'.format(current))
print('current variation (std dev across sticks): {:.2e} \u00b1 {:.2e} A'.format(currentmean, currentvar))
return [self.n, self.scaling, self.n/self.scaling**2, len(self.clustersizes), self.clustersizes.mean(), self.clustersizes.std(), self.clustersizes.max(),self.percolating, self.cnet.vds, current,currentmean, currentvar, self.fname, self.seed]
def check_intersect(self, s1,s2):
#assert that x intervals overlap
if max(s1[:,0])<min(s2[:,0]) and max(s1[:,1])<min(s2[:,1]):
return False # intervals do not overlap
#gradients
m1=(s1[0,1]-s1[1,1])/(s1[0,0]-s1[1,0])
m2=(s2[0,1]-s2[1,1])/(s2[0,0]-s2[1,0])
#intercepts
b1=s1[0,1]-m1*s1[0,0]
b2=s2[0,1]-m2*s2[0,0]
if m1==m2:
return False #lines are parallel
#xi,yi on both lines
xi=(b2-b1)/(m1-m2)
yi=(b2*m1-b1*m2)/(m1-m2)
if min(s1[:,0])<xi<max(s1[:,0]) and min(s2[:,0])<xi<max(s2[:,0]):
return [xi,yi]
else:
return False
def get_distance(self,p1,p2):
return np.sqrt((p1[0]-p2[0])**2+(p1[1]-p2[1])**2)
def get_ends(self, row):
xc,yc,angle,length = row[0],row[1],row[2],row[3]
x1=xc-length/2*np.cos(angle)
x2=xc+length/2*np.cos(angle)
y1=yc+length/2*np.sin(angle)
y2=yc-length/2*np.sin(angle)
return np.array([ [x1,y1],[x2,y2] ])
def make_stick(self,l=None,kind='s',pm=0,scaling=1):
"""makes a stick with [xc, yc, angle, length, kind, endarray]
the end array is of the form [ [x1,y1],[x2,y2] ]"""
if np.random.rand()<=pm:
kind='m'
if type(l)!=str:
stick=[np.random.rand(), np.random.rand(), np.random.rand()*2*np.pi, l/scaling,kind]
elif l=='exp':
stick= [np.random.rand(), np.random.rand(), np.random.rand()*2*np.pi, abs(np.random.normal(0.66,0.44))/scaling,kind]
else:
print('invalid L value')
stick.append(self.get_ends(stick))
return stick
def make_sticks(self, n,**kwargs):
# adds a vertical source and drain stick on left and right respectively
source=[0.01, 0.5,np.pi/2-1e-6,100,'v']
source.append(self.get_ends(source))
drain=[.99, 0.5,np.pi/2-1e-6,100,'v']
drain.append(self.get_ends(drain))
return pd.DataFrame( [source]+[self.make_stick(**kwargs) for i in range(n)]+[drain] ,columns=[ "xc", "yc", "angle", "length",'kind', "endarray"])
# return pd.DataFrame( [self.make_stick(**kwargs) for i in range(n)] ,columns=[ "xc", "yc", "angle", "length",'kind', "endarray"])
def make_intersects_kdtree(self,sticks):
sticks['cluster']=sticks.index
sticks.sort_values('length',inplace=True,ascending=False)
sticks.reset_index(drop=True,inplace=True)
intersects=[]
X=sticks.loc[:,'xc':'yc'].values
endpoints=sticks.endarray.values
kinds=sticks.kind.values
lengths=sticks.length.values
tree=spatial.KDTree(X)
for i in range(len(sticks)):
neighbors = tree.query_ball_point(X[i],lengths[i])
for j in neighbors:
# ensures no double counting and self counting
if i<j:
intersection=self.check_intersect(endpoints[i],endpoints[j])
if intersection and 0<=intersection[0]<=1 and 0<=intersection[1]<=1:
intersects.append([i,j,*intersection, kinds[i]+kinds[j]],)
intersects=pd.DataFrame(intersects, columns=["stick1",'stick2','x','y','kind'])
return sticks, intersects
def make_trivial_sticks(self):
source=[0.01, 0.5,np.pi/2-1e-6,1.002,'m']
source.append(self.get_ends(source))
drain=[.99, 0.5,np.pi/2-1e-6,1.001,'m']
drain.append(self.get_ends(drain))
st1=[0.3, 0.5,np.pi/4,1,'s']
st1.append(self.get_ends(st1))
st2=[0.7, 0.5,-np.pi/4,1,'s']
st2.append(self.get_ends(st2))
st3=[0.5, 0.5,-np.pi/4,0.1,'s']
st3.append(self.get_ends(st3))
st4=[0.5, 0.5,np.pi/4,0.1,'s']
st4.append(self.get_ends(st4))
sticks=pd.DataFrame([source]+[st1]+[st2]+[st3]+[st4]+[drain],columns=[ "xc", "yc", "angle", "length",'kind', "endarray"])
self.sticks, self.intersects = self.make_intersects_kdtree(sticks)
self.make_cnet()
def make_graph(self):
# only calculates the conduction through the spanning cluster of sticks
# to avoid the creation of a singular adjacency matrix caused by
# disconnected junctions becoming unconnected nodes in the cnet
self.graph=nx.from_pandas_edgelist(self.intersects, source='stick1',target='stick2',edge_attr=True)
for c in nx.connected_components(self.graph):
if (0 in c) and (1 in c):
self.percolating=True
connected_graph=self.graph.subgraph(c)
if self.percolating:
self.ground_nodes=[1]
self.voltage_sources=[[0,0.1]]
self.populate_graph(self.onoffmap)
for node in connected_graph.nodes():
connected_graph.nodes[node]['pos'] = [self.sticks.loc[node,'xc'], self.sticks.loc[node,'yc']]
for edge in connected_graph.edges():
connected_graph.edges[edge]['pos'] = [connected_graph.edges[edge]['x'], connected_graph.edges[edge]['y']]
return connected_graph
else:
return False,False,False
def populate_graph(self,onoffmap):
for edge in self.graph.edges():
self.graph.edges[edge]['component']=self.element( self.graph.edges[edge]['kind'], onoffmap )
def label_clusters(self):
i=0
components=nx.connected_components(self.graph)
clustersizes=[]
for c in components:
clustersizes.append(len(c))
for n in c:
self.sticks.loc[n,'cluster']=i
i+=1
self.clustersizes=np.array(clustersizes)
def make_cnet(self):
try:
connected_graph=self.make_graph()
assert self.percolating, "The network is not conducting!"
self.cnet=ConductionNetwork(connected_graph,self.ground_nodes,self.voltage_sources)
self.cnet.set_global_gate(0)
# self.cnet.set_local_gate([0.5,0,0.16,0.667], 10)
self.cnet.update()
except:
connected_graph=self.make_graph()
traceback.print_exc(file=sys.stdout)
pass
def timestamp(self):
return datetime.now().strftime('%y-%m-%d_%H%M%S_%f')
def make_fname(self):
self.notes="{}_{}sticks_{}x{}um_{}L_{}".format( self.seed,self.n,self.scaling,self.scaling,self.l,self.notes)
fname=os.path.join(self.directory,self.notes)
return fname
def save_system(self,fname=False):
#saves the sticks DataFrame
if not(fname):
fname=self.fname
self.sticks.to_csv(fname+'_sticks.csv')
#saves the intersects dataframe
self.intersects.to_csv(fname+'_intersects.csv')
#save the graph object
# nx.write_yaml(self.graph,self.fname+'_graph.yaml')
def load_system(self,fname,network=True):
# need to incorporate intelligent filename reading if we want
# to be able to display files without manually imputting scaling
# print("loading sticks")
self.sticks= | pd.read_csv(fname+'_sticks.csv',index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
import datetime
import time
import math
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt import black_litterman
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt.black_litterman import BlackLittermanModel
from statsmodels.tsa.arima_model import ARIMA
def filter(init, source, asset_arr=[1, 2, 3, 4], geo_arr=[7, 2, 3, 5, 4, 6, 1], score=3):
# Filter according to user's rank
asset_class = ["Equity", "Fixed Income",
"Mixed Allocation", "Money Market"]
geo_class = ["Africa& Middle West Region", "Asian Pacific Region", "European Region", "Greater China",
"International", "Latin American Region", "U.S."]
fund_num = init.shape[0]
filter_re = []
for i in range(0, fund_num):
asset_tmp = init['Asset Class'][i]
geo_tmp = init['Geographical Focus'][i]
if ((asset_tmp == asset_class[asset_arr[0] - 1] or asset_tmp == asset_class[asset_arr[1] - 1] or asset_tmp == asset_class[asset_arr[2] - 1]) and (geo_tmp == geo_class[geo_arr[0] - 1] or geo_tmp == geo_class[geo_arr[1] - 1] or geo_tmp == geo_class[geo_arr[2] - 1] or geo_tmp == geo_class[geo_arr[3] - 1])):
filter_re.append(init['ISIN'][i])
# If number of the funds filted is smaller than 100(can be specified), choose again
fund_filted_min = 100
for i in range(4, 7):
if (len(filter_re) < fund_filted_min):
for j in range(0, fund_num):
asset_tmp = init['Asset Class'][j]
if ((asset_tmp == asset_class[asset_arr[0] - 1] or asset_tmp == asset_class[asset_arr[1] - 1] or asset_tmp == asset_class[asset_arr[2] - 1]) and geo_class[geo_arr[i] - 1] == init['Geographical Focus'][j]):
filter_re.append(init['ISIN'][j])
else:
break
# data: names after filter + their risks
data = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# finpie - a simple library to download some financial data
# https://github.com/peterlacour/finpie
#
# Copyright (c) 2020 <NAME>
#
# Licensed under the MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import pandas as pd
import datetime as dt
from finpie.base import DataBase
class YahooData( DataBase ):
def __init__(self, ticker):
DataBase.__init__(self)
self.ticker = ticker
def key_metrics(self):
'''
'''
url = f'https://finance.yahoo.com/quote/{self.ticker}/key-statistics?p={self.ticker}'
soup = self._get_session(url)
df = pd.concat( [ pd.read_html( str(t) )[0].transpose()
for t in soup.find_all('table')[1:] ], axis = 1 )
df.columns = df.iloc[0]
df = df[1:]
df.reset_index(inplace = True, drop = True)
df.columns = [c[:-1].strip().replace(' ', '_').replace('/', '')
if c.strip()[-1].isdigit() else c.strip() \
.replace(' ', '_').replace('/', '') for c in df.columns ]
df['ticker'] = self.ticker
df.replace(',', '', regex = True, inplace = True)
df = self._col_to_float(df)
df.columns = [ col.replace(' ', '_').replace('/','_').replace('.', '').replace(',', '').replace('&', 'and').lower() for col in df.columns ]
df.index = [pd.to_datetime(dt.datetime.today().date())]
df.index.name = 'date'
return df
def _download(self, url):
'''
'''
soup = self._get_session(url)
tempDict = {}
lineitems = soup.find_all('div', class_ = "D(tbr)")
for l in lineitems:
temp = l.find_all('div', class_ = 'Ta(c)')
tempList = []
for t in temp:
tempList.append(t.text)
tempDict[ l.find('div', class_ = 'Ta(start)').text ] = tempList
cols = [ c.find('div', class_ = 'Ta(start)').text for c in lineitems ]
df = pd.DataFrame(tempDict, columns = cols )
df = df.loc[:,~df.columns.duplicated()]
df.replace(',','', regex = True, inplace = True)
df.replace('-', np.nan, inplace = True)
df['ticker'] = self.ticker
df.columns = [ col.replace(',','').replace('(','').replace(')','') \
.replace('&','and').replace('/','_').replace(' ', '_' )
for col in df.columns ]
df.columns = [ col.replace(' ', '_').replace('/','_').replace('.', '').replace(',', '').replace('&', 'and').lower() for col in df.columns ]
df.replace(',', '', regex = True, inplace = True)
df = df[ df.breakdown != 'ttm' ]
df.index = pd.to_datetime(df.breakdown)
df.index.name = 'date'
df.drop('breakdown', axis = 1, inplace = True)
return self._col_to_float(df)
def cashflow_statement(self):
url = "https://finance.yahoo.com/quote/" + self.ticker + "/cash-flow?p=" + self.ticker
try:
df = self._download(url)
return df
except:
print(f'Download failed. Ticker {self.ticker} may not be available.')
def income_statement(self):
url = "https://finance.yahoo.com/quote/" + self.ticker + "/financials?p=" + self.ticker
try:
df = self._download(url)
return df
except:
print(f'Download failed. Ticker {self.ticker} may not be available.')
def balance_sheet(self):
url = "https://finance.yahoo.com/quote/" + self.ticker + "/balance-sheet?p=" + self.ticker
try:
df = self._download(url)
return df
except:
print(f'Download failed. Ticker {self.ticker} may not be available.')
def statements(self):
'''
'''
incomeStatement = self.income_statement()
balanceSheet = self.balance_sheet()
cashflowStatement = self.cashflow_statement()
return incomeStatement, balanceSheet, cashflowStatement
def earnings_estimate(self):
url = f'https://finance.yahoo.com/quote/{self.ticker}/analysis'
soup = self._get_session(url)
df = pd.read_html( str( soup.find('table') ) )[0].transpose()
df.reset_index(inplace = True)
df.columns = ['reference_date'] + [c[:-1].strip().replace(' ', '_').replace('/', '')
if c.strip()[-1].isdigit() else c.strip().replace(' ', '_').replace('/', '')
for c in df.iloc[0][1:].values.tolist()]
df = df[1:]
df.iloc[:, 1:] = df.iloc[:, 1:]
df.columns = [ col.replace(' ', '_').replace('/','_').replace('.', '').replace(',', '').replace('&', 'and').lower() for col in df.columns ]
df.replace(',', '', regex = True, inplace = True)
df.columns = [ col.lower().replace(' ', '_') for col in df.columns ]
df.index = [pd.to_datetime( dt.datetime.today().date() )] * len(df)
df.index.name = 'date'
return self._col_to_float(df)
def earnings_history(self):
url = f'https://finance.yahoo.com/quote/{self.ticker}/analysis'
soup = self._get_session(url)
df = pd.read_html( str( soup.find_all('table')[2] ) )[0].transpose()
df.reset_index(inplace = True)
df.columns = ['reference_date'] + [c[:-1].strip().replace(' ', '_').replace('/', '')
if c.strip()[-1].isdigit() else c.strip().replace(' ', '_').replace('/', '')
for c in df.iloc[0][1:].values.tolist()]
df = df[1:]
df.replace(',', '', regex = True, inplace = True)
df = self._col_to_float(df)
df.iloc[:, 1:] = df.iloc[:, 1:]
df.columns = [ col.replace(' ', '_').replace('/','_').replace('.', '').replace(',', '').replace('&', 'and').lower() for col in df.columns ]
df.columns = [ col.lower().replace(' ', '_') for col in df.columns ]
df.index = [pd.to_datetime( dt.datetime.today().date() )] * len(df)
df.index.name = 'date'
return df
def revenue_estimates(self):
url = f'https://finance.yahoo.com/quote/{self.ticker}/analysis'
soup = self._get_session(url)
df = pd.read_html( str( soup.find_all('table')[1] ) )[0].transpose()
df.reset_index(inplace = True)
df.columns = ['reference_date'] + [c[:-1].strip().replace(' ', '_').replace('/', '')
if c.strip()[-1].isdigit() else c.strip().replace(' ', '_').replace('/', '')
for c in df.iloc[0][1:].values.tolist()]
df = df[1:]
df.replace(',', '', regex = True, inplace = True)
df = self._col_to_float(df)
df.iloc[:, 1:] = df.iloc[:, 1:]
df.columns = [ col.replace(' ', '_').replace('/','_').replace('.', '').replace(',', '').replace('&', 'and').lower() for col in df.columns ]
df.columns = [ col.lower().replace(' ', '_') for col in df.columns ]
df.index = [pd.to_datetime( dt.datetime.today().date() )] * len(df)
df.index.name = 'date'
return df
def growth_estimates(self):
url = f'https://finance.yahoo.com/quote/{self.ticker}/analysis'
soup = self._get_session(url)
df = pd.read_html( str( soup.find_all('table')[-1] ) )[0].transpose()
df.reset_index(inplace = True)
df.columns = ['reference_date'] + [c[:-1].strip().replace(' ', '_').replace('/', '')
if c.strip()[-1].isdigit() else c.strip().replace(' ', '_').replace('/', '')
for c in df.iloc[0][1:].values.tolist()]
df = df[1:]
df = df.astype('str')
df.replace(',', '', regex = True, inplace = True)
df = self._col_to_float(df)
df.iloc[:, 1:] = df.iloc[:, 1:].astype('float')
df = df.transpose()
df.columns = df.iloc[0]
df = df[1:]
df.columns = [ col.replace(' ', '_').replace('/','_').replace('.', '').replace(',', '').replace('&', 'and').lower() for col in df.columns ]
df.columns = [ col.lower().replace(' ', '_') for col in df.columns ]
df.index = [pd.to_datetime( dt.datetime.today().date() )] * len(df)
df.index.name = 'date'
return df
def earnings_estimate_trends(self):
url = f'https://finance.yahoo.com/quote/{self.ticker}/analysis'
soup = self._get_session(url)
df = pd.read_html( str( soup.find_all('table')[3] ) )[0].transpose()
df.reset_index(inplace = True)
df.columns = ['reference_date'] + [c[:-1].strip().replace(' ', '_').replace('/', '')
if c.strip()[-1].isdigit() else c.strip().replace(' ', '_').replace('/', '')
for c in df.iloc[0][1:].values.tolist()]
df = df[1:]
df.replace(',', '', regex = True, inplace = True)
df = self._col_to_float(df)
df.iloc[:, 1:] = df.iloc[:, 1:].astype('float')
df.columns = [ col.replace(' ', '_').replace('/','_').replace('.', '').replace(',', '').replace('&', 'and').lower() for col in df.columns ]
df.columns = [ col.lower().replace(' ', '_') for col in df.columns ]
df.index = [pd.to_datetime( dt.datetime.today().date() )] * len(df)
df.index.name = 'date'
return df
def esg_score(self):
'''
'''
url = f'https://finance.yahoo.com/quote/{self.ticker}/sustainability?p={self.ticker}'
soup = self._get_session(url)
section = soup.find(attrs = {'data-test': 'qsp-sustainability'})
df = pd.DataFrame( {
'total_esg_risk_score': float(section.find('div', string = 'Total ESG Risk score').find_next('div').find_next('div').text),
'risk_category': section.find('div', string = 'Total ESG Risk score').find_next('div').find_next('div').find_next('div').find_next('div').text,
'risk_percentile': section.find('div', string = 'Total ESG Risk score').find_next('div').find_next('div').find_next('span').text.replace(' percentile', ''),
'environment_risk_score': float(section.find('div', string = 'Environment Risk Score').find_next('div').find_next('div').text),
'social_risk_score': float(section.find('div', string = 'Social Risk Score').find_next('div').find_next('div').text),
'governance_risk_score': float(section.find('div', string = 'Governance Risk Score').find_next('div').find_next('div').text),
#'controversy_level': float(section.find('span', string = 'Controversy Level').find_next('div', class_ = 'Mt(15px)').find_next('div').find_next('div').find_next('div').find_next('div').find_next('div').text),
'ticker' : self.ticker }, index = [0] )
df.index = [pd.to_datetime( dt.datetime.today().date() )]
df.index.name = 'date'
return df
def corporate_governance_score(self):
'''
'''
url = f'https://finance.yahoo.com/quote/{self.ticker}/profile?p={self.ticker}'
soup = self._get_session(url)
temp = { i.split(':')[0].replace('The pillar scores are', '').strip(): i.split(':')[1].replace('.', '').strip() for i in soup.find_all('section')[-1].find_all('span')[3].text.split(';') }
temp['quality_score'] = soup.find_all('section')[-1].find_all('span')[1].text.replace('.','')[-2:].strip()
df = pd.DataFrame(temp, index = [0])
df['ticker'] = self.ticker
df['date'] = dt.datetime.today().date()
df.columns = [ col.lower().replace(' ', '_') for col in df.columns ]
df.replace(',', '', regex = True, inplace = True)
df = self._col_to_float(df)
df.index = [pd.to_datetime( dt.datetime.today().date() )]
df.index.name = 'date'
return df
def profile(self):
url = f'https://finance.yahoo.com/quote/{self.ticker}/profile?p={self.ticker}'
soup = self._get_session(url)
try:
no_of_employees = int( soup.find('span', string = 'Full Time Employees').find_next('span').text.replace(',', '') )
except:
no_of_employees = np.nan
df = pd.DataFrame( { 'company_name': soup.find_all('section')[1].find('h3').text,
'sector': soup.find('span', string = 'Sector(s)').find_next('span').text,
'industry': soup.find('span', string = 'Industry').find_next('span').text,
'number_of_employees': no_of_employees,
'description': soup.find('h2', string = 'Description').find_next('p').text,
'ticker': self.ticker }, index = [0] )
df.index = [pd.to_datetime( dt.datetime.today().date() )]
df.index.name = 'date'
return df
def executives_info(self):
'''
'''
url = f'https://finance.yahoo.com/quote/{self.ticker}/profile?p={self.ticker}'
soup = self._get_session(url)
df = pd.read_html( str( soup.find('table') ) )[0]
df['Gender'] = [ 'male' if 'Mr.' in n else 'female' for n in df.Name ]
df['Age_at_end_of_year'] = [ dt.datetime.today().year - int(y) if not | pd.isnull(y) | pandas.isnull |
import copy
import numpy as np
import pandas as pd
from powersimdata.utility.distance import haversine
class TransformGrid:
"""Transforms grid according to operations listed in change table."""
def __init__(self, grid, ct):
"""Constructor
:param powersimdata.input.grid.Grid grid: a Grid object.
:param dict ct: change table.
"""
self.grid = copy.deepcopy(grid)
self.ct = copy.deepcopy(ct)
self.gen_types = [
"biomass",
"coal",
"dfo",
"geothermal",
"ng",
"nuclear",
"hydro",
"solar",
"wind",
"wind_offshore",
"other",
]
self.thermal_gen_types = ["coal", "dfo", "geothermal", "ng", "nuclear"]
def get_grid(self):
"""Returns the transformed grid.
:return: (*powersimdata.input.grid.Grid*) -- a Grid object.
"""
if bool(self.ct):
self._apply_change_table()
return self.grid
def _apply_change_table(self):
"""Apply changes listed in change table to the grid."""
# First scale by zones, so that zone factors are not applied to additions.
for g in self.gen_types:
if g in self.ct.keys():
self._scale_gen_by_zone(g)
if f"{g}_cost" in self.ct.keys():
self._scale_gencost_by_zone(g)
if f"{g}_pmin" in self.ct.keys():
self._scale_gen_pmin_by_zone(g)
if "branch" in self.ct.keys():
self._scale_branch_by_zone()
# Then, add new elements
if "new_bus" in self.ct.keys():
self._add_bus()
if "new_branch" in self.ct.keys():
self._add_branch()
if "new_dcline" in self.ct.keys():
self._add_dcline()
if "new_plant" in self.ct.keys():
self._add_gen()
if "storage" in self.ct.keys():
self._add_storage()
# Scale by IDs, so that additions can be scaled.
for g in self.gen_types:
if g in self.ct.keys():
self._scale_gen_by_id(g)
if f"{g}_cost" in self.ct.keys():
self._scale_gencost_by_id(g)
if f"{g}_pmin" in self.ct.keys():
self._scale_gen_pmin_by_id(g)
if "branch" in self.ct.keys():
self._scale_branch_by_id()
if "dcline" in self.ct.keys():
self._scale_dcline()
# Finally, remove elements (so that removal doesn't cause downstream errors)
if "remove_branch" in self.ct.keys():
self._remove_branch()
if "remove_bus" in self.ct.keys():
self._remove_bus()
def _scale_gen_by_zone(self, gen_type):
"""Scales capacity of generators, by zone. Also scales the associated generation
cost curve (to maintain the same slopes at the start/end of the curve).
:param str gen_type: type of generator.
"""
if "zone_id" in self.ct[gen_type].keys():
for zone_id, factor in self.ct[gen_type]["zone_id"].items():
plant_id = (
self.grid.plant.groupby(["zone_id", "type"])
.get_group((zone_id, gen_type))
.index.tolist()
)
self._scale_gen_capacity(plant_id, factor)
if gen_type in self.thermal_gen_types:
self._scale_gencost_by_capacity(plant_id, factor)
def _scale_gen_by_id(self, gen_type):
"""Scales capacity of generators by ID. Also scales the associated generation
cost curve (to maintain the same slopes at the start/end of the curve).
:param str gen_type: type of generator.
"""
if "plant_id" in self.ct[gen_type].keys():
for plant_id, factor in self.ct[gen_type]["plant_id"].items():
self._scale_gen_capacity(plant_id, factor)
if gen_type in self.thermal_gen_types:
self._scale_gencost_by_capacity(plant_id, factor)
def _scale_gencost_by_zone(self, gen_type):
"""Scales cost of generators, by zone.
:param str gen_type: type of generator.
"""
cost_key = f"{gen_type}_cost"
if "zone_id" in self.ct[cost_key].keys():
for zone_id, factor in self.ct[cost_key]["zone_id"].items():
plant_id = (
self.grid.plant.groupby(["zone_id", "type"])
.get_group((zone_id, gen_type))
.index.tolist()
)
self.grid.gencost["before"].loc[plant_id, ["c0", "c1", "c2"]] *= factor
def _scale_gencost_by_id(self, gen_type):
"""Scales cost of generators, by ID.
:param str gen_type: type of generator.
"""
cost_key = f"{gen_type}_cost"
if "plant_id" in self.ct[cost_key].keys():
for plant_id, factor in self.ct[cost_key]["plant_id"].items():
self.grid.gencost["before"].loc[plant_id, ["c0", "c1", "c2"]] *= factor
def _scale_gen_pmin_by_zone(self, gen_type):
"""Scales minimum generation of generators, by zone.
:param str gen_type: type of generator.
"""
pmin_key = f"{gen_type}_pmin"
if "zone_id" in self.ct[pmin_key].keys():
for zone_id, factor in self.ct[pmin_key]["zone_id"].items():
plant_id = (
self.grid.plant.groupby(["zone_id", "type"])
.get_group((zone_id, gen_type))
.index.tolist()
)
self.grid.plant.loc[plant_id, "Pmin"] *= factor
def _scale_gen_pmin_by_id(self, gen_type):
"""Scales minimum generation of generators, by ID.
:param str gen_type: type of generator.
"""
pmin_key = f"{gen_type}_pmin"
if "plant_id" in self.ct[pmin_key].keys():
for plant_id, factor in self.ct[pmin_key]["plant_id"].items():
self.grid.plant.loc[plant_id, "Pmin"] *= factor
def _scale_gen_capacity(self, plant_id, factor):
"""Scales capacity of plants.
:param int/list plant_id: plant identification number(s).
:param float factor: scaling factor.
"""
self.grid.plant.loc[plant_id, "Pmax"] *= factor
self.grid.plant.loc[plant_id, "Pmin"] *= factor
def _scale_gencost_by_capacity(self, plant_id, factor):
"""Scales generation cost curves along with capacity, such that the start/end
slopes are consistent before and after.
:param int/list plant_id: plant identification number(s).
:param float factor: scaling factor.
:return:
"""
self.grid.gencost["before"].loc[plant_id, "c0"] *= factor
if factor != 0:
self.grid.gencost["before"].loc[plant_id, "c2"] /= factor
def _scale_branch_by_zone(self):
"""Scales capacity of AC lines, by zone, for lines entirely within that zone."""
if "zone_id" in self.ct["branch"].keys():
for zone_id, factor in self.ct["branch"]["zone_id"].items():
branch_id = (
self.grid.branch.groupby(["from_zone_id", "to_zone_id"])
.get_group((zone_id, zone_id))
.index.tolist()
)
self._scale_branch_capacity(branch_id, factor)
def _scale_branch_by_id(self):
"""Scales capacity of AC lines, by ID."""
if "branch_id" in self.ct["branch"].keys():
for branch_id, factor in self.ct["branch"]["branch_id"].items():
self._scale_branch_capacity(branch_id, factor)
def _scale_branch_capacity(self, branch_id, factor):
"""Scales capacity of AC lines.
:param int/list branch_id: branch identification number(s)
:param float factor: scaling factor
"""
self.grid.branch.loc[branch_id, "rateA"] *= factor
self.grid.branch.loc[branch_id, "x"] /= factor
def _scale_dcline(self):
"""Scales capacity of HVDC lines."""
for dcline_id, factor in self.ct["dcline"]["dcline_id"].items():
self.grid.dcline.loc[dcline_id, "Pmin"] *= factor
self.grid.dcline.loc[dcline_id, "Pmax"] *= factor
if factor == 0:
self.grid.dcline.loc[dcline_id, "status"] = 0
def _add_branch(self):
"""Adds branch(es) to the grid."""
v2x = voltage_to_x_per_distance(self.grid)
for entry in self.ct["new_branch"]:
new_branch = {c: 0 for c in self.grid.branch.columns}
from_bus_id = entry["from_bus_id"]
to_bus_id = entry["to_bus_id"]
interconnect = self.grid.bus.loc[from_bus_id].interconnect
from_zone_id = self.grid.bus.loc[from_bus_id].zone_id
to_zone_id = self.grid.bus.loc[to_bus_id].zone_id
from_zone_name = self.grid.id2zone[from_zone_id]
to_zone_name = self.grid.id2zone[to_zone_id]
from_lon = self.grid.bus.loc[from_bus_id].lon
from_lat = self.grid.bus.loc[from_bus_id].lat
to_lon = self.grid.bus.loc[to_bus_id].lon
to_lat = self.grid.bus.loc[to_bus_id].lat
from_basekv = v2x[self.grid.bus.loc[from_bus_id].baseKV]
to_basekv = v2x[self.grid.bus.loc[to_bus_id].baseKV]
distance = haversine((from_lat, from_lon), (to_lat, to_lon))
x = distance * np.mean([from_basekv, to_basekv])
new_branch["from_bus_id"] = entry["from_bus_id"]
new_branch["to_bus_id"] = entry["to_bus_id"]
new_branch["status"] = 1
new_branch["ratio"] = 0
new_branch["branch_device_type"] = "Line"
new_branch["rateA"] = entry["Pmax"]
new_branch["interconnect"] = interconnect
new_branch["from_zone_id"] = from_zone_id
new_branch["to_zone_id"] = to_zone_id
new_branch["from_zone_name"] = from_zone_name
new_branch["to_zone_name"] = to_zone_name
new_branch["from_lon"] = from_lon
new_branch["from_lat"] = from_lat
new_branch["to_lon"] = to_lon
new_branch["to_lat"] = to_lat
new_branch["x"] = x
new_index = [self.grid.branch.index[-1] + 1]
self.grid.branch = self.grid.branch.append(
| pd.DataFrame(new_branch, index=new_index) | pandas.DataFrame |
#%%
import ee
from ee.data import exportTable
import eemont
import re
from datetime import datetime
import pandas as pd
import numpy as np
from pandas.core import frame
import geopandas as gpd
import matplotlib.pyplot as plt
import dload
from py01_helper_functions import ee_collection_pull, process_gdf
# %%
ee.Authenticate()
ee.Initialize()
# %%
sa_cereals_class = gpd.read_file('sa-cereals/sa_cereals_class.shp')
#%% DEFINE COLLECTION
# Landsat Spectral Indicies Collections
l5_EVI = "LANDSAT/LT05/C01/T1_8DAY_EVI"
l5_NDVI = "LANDSAT/LT05/C01/T1_8DAY_NDVI"
l5_NDSI = "LANDSAT/LT05/C01/T1_8DAY_NDSI"
l5_NBR = "LANDSAT/LT05/C01/T1_8DAY_NBRT"
l7_EVI = "LANDSAT/LE07/C01/T1_8DAY_EVI"
l7_NDVI = "LANDSAT/LE07/C01/T1_8DAY_NDVI"
l7_NDSI = "LANDSAT/LE07/C01/T1_8DAY_NDSI"
l7_NBR = "LANDSAT/LE07/C01/T1_8DAY_NBRT"
# Initial date of interest (inclusive).
l5_start_date = '1989-01-01'
# Final date of interest (exclusive).
l5_end_date = '1999-12-31'
# Initial date of interest (inclusive).
l7_start_date = '2020-01-01'
# Final date of interest (exclusive).
l7_end_date = '2021-06-30'
#%%
####################################
# EVI COLLECTIONS #
####################################
try:
sa_all_polygons_evi = pd.read_csv('sa_all_polygons_evi.csv')
except:
l5_evi_collection = ee.ImageCollection(l5_EVI)\
.filterDate(l5_start_date, l5_end_date)\
.maskClouds()\
.preprocess()
l7_evi_collection = ee.ImageCollection(l7_EVI)\
.filterDate(l7_start_date, l7_end_date)\
.maskClouds()\
.preprocess()
landsat_5_evi = process_gdf(geopandas_frame = sa_cereals_class,
collection = l5_evi_collection,
index = 'EVI')
landsat_7_evi = process_gdf(geopandas_frame = sa_cereals_class,
collection = l7_evi_collection,
index = 'EVI')
sa_all_polygons_evi = pd.concat([landsat_5_evi, landsat_7_evi])
sa_all_polygons_evi.to_csv('sa_all_polygons_evi.csv')
sa_all_polygons_evi = pd.read_csv('sa_all_polygons_evi.csv')
# %%
####################################
# NDVI COLLECTIONS #
####################################
try:
sa_all_polygons_ndvi = pd.read_csv('sa_all_polygons_ndvi.csv')
except:
l5_ndvi_collection = ee.ImageCollection(l5_NDVI)\
.filterDate(l5_start_date, l5_end_date)\
.maskClouds()\
.preprocess()
l7_ndvi_collection = ee.ImageCollection(l7_NDVI)\
.filterDate(l7_start_date, l7_end_date)\
.maskClouds()\
.preprocess()
landsat_5_ndvi = process_gdf(geopandas_frame = sa_cereals_class,
collection = l5_ndvi_collection,
index = 'NDVI')
landsat_7_ndvi = process_gdf(geopandas_frame = sa_cereals_class,
collection = l7_ndvi_collection,
index = 'NDVI')
sa_all_polygons_ndvi = pd.concat([landsat_5_ndvi, landsat_7_ndvi])
sa_all_polygons_ndvi.to_csv('sa_all_polygons_ndvi.csv')
sa_all_polygons_ndvi = pd.read_csv('sa_all_polygons_ndvi.csv')
# %%
####################################
# NDSI COLLECTIONS #
####################################
try:
sa_all_polygons_ndsi = | pd.read_csv('sa_all_polygons_ndsi.csv') | pandas.read_csv |
from sys import argv
from Bio import SeqIO
import pandas as pd
import re
script, strain_name = argv
table1_df = []
table1_df = pd.read_csv('%s_table1.csv' % strain_name, sep='\t')
table1_df['product'].fillna('None', inplace=True)
subcluster_df = pd.read_csv('subcluster_dictionary.csv', sep='\t')
subcluster_dict = subcluster_df.set_index('product')['category'].to_dict()
col7 = []
col8 = []
missing_from_dict = []
table1_df['product'] = table1_df['product'].astype(str)
for line in table1_df['product']:
for key in subcluster_dict:
m = re.search(key, line, re.I)
if line != None and m != None:
col7.append(subcluster_dict[key])
col8.append(line)
frames = {'category':col7,'product':col8}
new_cols_df = pd.DataFrame(frames, index=None)
table1_df = | pd.merge(table1_df, new_cols_df, on='product', how='outer') | pandas.merge |
from sklearn import svm
from sklearn import model_selection
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
import pandas as pd
import utilities
# Load input data
input_file = 'data_multivar.txt'
X, y = utilities.load_data(input_file)
###############################################
# Train test split
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.25, random_state=5)
# Set the parameters by cross-validation
parameter_grid = {"C": [1, 10, 50, 600],
'kernel':['linear','poly','rbf'],
"gamma": [0.01, 0.001],
'degree': [2, 3]}
metrics = ['precision']
for metric in metrics:
print("#### Grid Searching optimal hyperparameters for", metric)
classifier = GridSearchCV(svm.SVC(C=1),
parameter_grid, cv=5,scoring=metric,return_train_score=True)
classifier.fit(X_train, y_train)
print("Scores across the parameter grid:")
GridSCVResults = pd.DataFrame(classifier.cv_results_)
for i in range(0,len(GridSCVResults)):
print(GridSCVResults.params[i], '-->', round(GridSCVResults.mean_test_score[i],3))
print("Highest scoring parameter set:", classifier.best_params_)
y_true, y_pred = y_test, classifier.predict(X_test)
print("Full performance report:\n")
print(classification_report(y_true, y_pred))
# Perform a randomized search on hyper parameters
from sklearn.model_selection import RandomizedSearchCV
parameter_rand = {"C": [1, 10, 50, 600],
'kernel':['linear','poly','rbf'],
"gamma": [0.01, 0.001],
'degree': [2, 3]}
metrics = ['precision']
for metric in metrics:
print("#### Randomized Searching optimal hyperparameters for", metric)
classifier = RandomizedSearchCV(svm.SVC(C=1),
param_distributions=parameter_rand,n_iter=30, cv=5,return_train_score=True)
classifier.fit(X_train, y_train)
print("Scores across the parameter grid:")
RandSCVResults = | pd.DataFrame(classifier.cv_results_) | pandas.DataFrame |
from pathlib import Path
import copy
import pickle as pkl
from mmap import mmap
from scipy import stats as st
from scipy.stats._continuous_distns import FitDataError
import torch
from sklearn import svm
from sklearn import linear_model
import pandas as pd
import seaborn as sns
import warnings
import numpy as np
import os
import matplotlib.colors as mcolors
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from mpl_toolkits.mplot3d.art3d import juggle_axes
from matplotlib.ticker import MaxNLocator
from joblib import Memory
import math
import lyap
import model_loader_utils as loader
import initialize_and_train as train
import utils
memory = Memory(location='./memoization_cache', verbose=2)
# memory.clear()
## Functions for computing means and error bars for the plots. 68% confidence
# intervals and means are currently
# implemented in this code. The commented out code is for using a gamma
# distribution to compute these, but uses a
# custom version of seaborn plotting library to plot.
def orth_proj(v):
n = len(v)
vv = v.reshape(-1, 1)
return torch.eye(n) - ([email protected])/(v@v)
USE_ERRORBARS = True
# USE_ERRORBARS = False
LEGEND = False
# LEGEND = True
folder_root = '../results/figs/'
def ci_acc(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=1.,
shift=-.0001, reflect=True)
return bounds[1], bounds[0]
# ci_acc = 68
# ci_acc = 95
def est_acc(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=1.,
shift=-.0001, reflect=True)
return median
# est_acc = "mean"
def ci_dim(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=.9999)
return bounds[1], bounds[0]
# ci_dim = 68
# ci_dim = 95
def est_dim(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=.9999)
return median
# est_dim = "mean"
def point_replace(a_string):
a_string = str(a_string)
return a_string.replace(".", "p")
def get_color(x, cmap=plt.cm.plasma):
"""Get normalized color assignments based on input data x and colormap
cmap."""
mag = torch.max(x) - torch.min(x)
x_norm = (x.float() - torch.min(x))/mag
return cmap(x_norm)
def median_and_bound(samples, perc_bound, dist_type='gamma', loc=0., shift=0,
reflect=False):
"""Get median and probability mass intervals for a gamma distribution fit
of samples."""
samples = np.array(samples)
def do_reflect(x, center):
return -1*(x - center) + center
if dist_type == 'gamma':
if np.sum(samples[0] == samples) == len(samples):
median = samples[0]
interval = [samples[0], samples[0]]
return median, interval
if reflect:
samples_reflected = do_reflect(samples, loc)
shape_ps, loc_fit, scale = st.gamma.fit(samples_reflected,
floc=loc + shift)
median_reflected = st.gamma.median(shape_ps, loc=loc, scale=scale)
interval_reflected = np.array(
st.gamma.interval(perc_bound, shape_ps, loc=loc, scale=scale))
median = do_reflect(median_reflected, loc)
interval = do_reflect(interval_reflected, loc)
else:
shape_ps, loc, scale = st.gamma.fit(samples, floc=loc + shift)
median = st.gamma.median(shape_ps, loc=loc, scale=scale)
interval = np.array(
st.gamma.interval(perc_bound, shape_ps, loc=loc, scale=scale))
else:
raise ValueError("Distribution option (dist_type) not recognized.")
return median, interval
## Set parameters for figure aesthetics
plt.rcParams['font.size'] = 6
plt.rcParams['font.size'] = 6
plt.rcParams['lines.markersize'] = 1
plt.rcParams['lines.linewidth'] = 1
plt.rcParams['axes.labelsize'] = 7
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.titlesize'] = 8
# Colormaps
class_style = 'color'
cols11 = np.array([90, 100, 170])/255
cols12 = np.array([37, 50, 120])/255
cols21 = np.array([250, 171, 62])/255
cols22 = np.array([156, 110, 35])/255
cmap_activation_pnts = mcolors.ListedColormap([cols11, cols21])
cmap_activation_pnts_edge = mcolors.ListedColormap([cols12, cols22])
rasterized = False
dpi = 800
ext = 'pdf'
# Default figure size
figsize = (1.5, 1.2)
ax_pos = (0, 0, 1, 1)
def make_fig(figsize=figsize, ax_pos=ax_pos):
"""Create figure."""
fig = plt.figure(figsize=figsize)
ax = fig.add_axes(ax_pos)
return fig, ax
def out_fig(fig, figname, subfolder='', show=False, save=True, axis_type=0,
name_order=0, data=None):
""" Save figure."""
folder = Path(folder_root)
figname = point_replace(figname)
# os.makedirs('../results/figs/', exist_ok=True)
os.makedirs(folder, exist_ok=True)
ax = fig.axes[0]
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_rasterized(rasterized)
if axis_type == 1:
ax.tick_params(axis='both', which='both',
# both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
left=False, top=False,
# ticks along the top edge are off
labelbottom=False,
labelleft=False) # labels along the bottom edge are off
elif axis_type == 2:
ax.axis('off')
if name_order == 0:
fig_path = folder/subfolder/figname
else:
fig_path = folder/subfolder/figname
if save:
os.makedirs(folder/subfolder, exist_ok=True)
fig_file = fig_path.with_suffix('.' + ext)
print(f"Saving figure to {fig_file}")
fig.savefig(fig_file, dpi=dpi, transparent=True, bbox_inches='tight')
if show:
fig.tight_layout()
fig.show()
if data is not None:
os.makedirs(folder/subfolder/'data/', exist_ok=True)
with open(folder/subfolder/'data/{}_data'.format(figname),
'wb') as fid:
pkl.dump(data, fid, protocol=4)
plt.close('all')
def autocorrelation(train_params, figname='autocorrelation'):
train_params_loc = train_params.copy()
model, params, run_dir = train.initialize_and_train(**train_params_loc)
class_datasets = params['datasets']
# val_loss = params['history']['losses']['val']
# val_losses[i0, i1] = val_loss
# val_acc = params['history']['accuracies']['val']
# val_accs[i0, i1] = val_acc
train_samples_per_epoch = len(class_datasets['train'])
class_datasets['train'].max_samples = 10
torch.manual_seed(params['model_seed'])
X = class_datasets['train'][:][0]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif train_params_loc['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
# X = utils.extend_input(X, 10)
loader.load_model_from_epoch_and_dir(model, run_dir, -1)
hid = []
hid += model.get_post_activations(X)[:-1]
# auto_corr_mean = []
# auto_corr_var = []
auto_corr_table = pd.DataFrame(columns=['t_next', 'autocorr'])
h = hid[0]
for i0 in range(len(hid)):
h_next = hid[i0]
overlap = torch.sum(h*h_next, dim=1)
norms_h = torch.sqrt(torch.sum(h**2, dim=1))
norms_h_next = torch.sqrt(torch.sum(h_next**2, dim=1))
corrs = overlap/(norms_h*norms_h_next)
avg_corr = torch.mean(corrs)
d = {'t_next': i0, 'autocorr': corrs}
auto_corr_table = auto_corr_table.append(pd.DataFrame(d),
ignore_index=True)
fig, ax = make_fig(figsize)
sns.lineplot(ax=ax, x='t_next', y='autocorr', data=auto_corr_table)
out_fig(fig, figname)
def snapshots_through_time(train_params, figname="snap", subdir="snaps"):
"""
Plot PCA snapshots of the representation through time.
Parameters
----------
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training.
"""
subdir = Path(subdir)
X_dim = train_params['X_dim']
FEEDFORWARD = train_params['network'] == 'feedforward'
num_pnts_dim_red = 800
num_plot = 600
train_params_loc = copy.deepcopy(train_params)
model, params, run_dir = train.initialize_and_train(**train_params_loc)
class_datasets = params['datasets']
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(train_params_loc['model_seed'])
X, Y = class_datasets['train'][:]
if FEEDFORWARD:
T = 10
y = Y
X0 = X
else:
T = 30
# T = 100
X = utils.extend_input(X, T + 2)
X0 = X[:, 0]
y = Y[:, -1]
loader.load_model_from_epoch_and_dir(model, run_dir, 0, 0)
hid_0 = [X0]
hid_0 += model.get_post_activations(X)[:-1]
loader.load_model_from_epoch_and_dir(model, run_dir,
train_params_loc['num_epochs'], 0)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
if FEEDFORWARD:
r = model.layer_weights[-1].detach().clone().T
else:
r = model.Wout.detach().clone()
# r0_n = r[0] / torch.norm(r[0])
# r1_n = r[1] / torch.norm(r[1])
#
# r0_n_v = r0_n.reshape(r0_n.shape[0], 1)
# r1_n_v = r1_n.reshape(r1_n.shape[0], 1)
# r0_orth = torch.eye(len(r0_n)) - r0_n_v @ r0_n_v.T
# r1_orth = torch.eye(len(r1_n)) - r1_n_v @ r1_n_v.T
# h = hid[10]
# # h_proj = h @ r_orth
# u, s, v = torch.svd(h)
# v0 = v[:, 0]
# def orth_projector(v):
# n = len(v)
# return (torch.eye(n) - v.reshape(n, 1)@v.reshape(1, n))/(v@v)
# v0_orth = (torch.eye(n) - v0.reshape(n,1)@v0.reshape(1,n))/(v0@v0)
# h_v0_orth = h @ v0_orth
# r0_e_p = orth_projector(r0_e)
# r1_e_p = orth_projector(r1_e)
# h_r0_e_p0 = h[y] @ r0_e_p
# h_r0_e_p1 = h[y] @ r1_e_p
coloring = get_color(y, cmap_activation_pnts)[:num_plot]
edge_coloring = get_color(y, cmap_activation_pnts_edge)[:num_plot]
## Now get principal components (pcs) and align them from time point to
# time point
pcs = []
p_track = 0
norm = np.linalg.norm
projs = []
for i1 in range(1, len(hid)):
# pc = utils.get_pcs_covariance(hid[i1], [0, 1])
out = utils.get_pcs_covariance(hid[i1], [0, 1], return_extra=True)
pc = out['pca_projection']
mu = out['mean']
proj = out['pca_projectors']
mu_proj = mu@proj[:, :2]
if i1 > 0:
# Check for the best alignment
pc_flip_x = pc.clone()
pc_flip_x[:, 0] = -pc_flip_x[:, 0]
pc_flip_y = pc.clone()
pc_flip_y[:, 1] = -pc_flip_y[:, 1]
pc_flip_both = pc.clone()
pc_flip_both[:, 0] = -pc_flip_both[:, 0]
pc_flip_both[:, 1] = -pc_flip_both[:, 1]
difference0 = norm(p_track - pc)
difference1 = norm(p_track - pc_flip_x)
difference2 = norm(p_track - pc_flip_y)
difference3 = norm(p_track - pc_flip_both)
amin = np.argmin(
[difference0, difference1, difference2, difference3])
if amin == 1:
pc[:, 0] = -pc[:, 0]
proj[:, 0] = -proj[:, 0]
elif amin == 2:
pc[:, 1] = -pc[:, 1]
proj[:, 1] = -proj[:, 1]
elif amin == 3:
pc[:, 0] = -pc[:, 0]
pc[:, 1] = -pc[:, 1]
proj[:, 0] = -proj[:, 0]
proj[:, 1] = -proj[:, 1]
pc = pc + mu_proj
p_track = pc.clone()
pcs.append(pc[:num_plot])
projs.append(proj)
def take_snap(i0, scats, fig, dim=2, border=False):
# ax = fig.axes[0]
hid_pcs_plot = pcs[i0][:, :dim].numpy()
xm = np.min(hid_pcs_plot[:, 0])
xM = np.max(hid_pcs_plot[:, 0])
ym = np.min(hid_pcs_plot[:, 1])
yM = np.max(hid_pcs_plot[:, 1])
xc = (xm + xM)/2
yc = (ym + yM)/2
hid_pcs_plot[:, 0] = hid_pcs_plot[:, 0] - xc
hid_pcs_plot[:, 1] = hid_pcs_plot[:, 1] - yc
v = projs[i0]
# u, s, v = torch.svd(h)
if r.shape[0] == 2:
r0_p = r[0]@v
r1_p = r[1]@v
else:
r0_p = r.flatten()@v
r1_p = -r.flatten()@v
if class_style == 'shape':
scats[0][0].set_offsets(hid_pcs_plot)
else:
if dim == 3:
scat._offsets3d = juggle_axes(*hid_pcs_plot[:, :dim].T, 'z')
scat._offsets3d = juggle_axes(*hid_pcs_plot[:, :dim].T, 'z')
else:
scats[0].set_offsets(hid_pcs_plot)
scats[1].set_offsets(r0_p[:2].reshape(1, 2))
scats[2].set_offsets(r1_p[:2].reshape(1, 2))
xm = np.min(hid_pcs_plot[:, 0])
xM = np.max(hid_pcs_plot[:, 0])
ym = np.min(hid_pcs_plot[:, 1])
yM = np.max(hid_pcs_plot[:, 1])
max_extent = max(xM - xm, yM - ym)
max_extent_arg = xM - xm > yM - ym
if dim == 2:
x_factor = .4
if max_extent_arg:
ax.set_xlim(
[xm - x_factor*max_extent, xM + x_factor*max_extent])
ax.set_ylim([xm - .1*max_extent, xM + .1*max_extent])
else:
ax.set_xlim(
[ym - x_factor*max_extent, yM + x_factor*max_extent])
ax.set_ylim([ym - .1*max_extent, yM + .1*max_extent])
else:
if max_extent_arg:
ax.set_xlim([xm - .1*max_extent, xM + .1*max_extent])
ax.set_ylim([xm - .1*max_extent, xM + .1*max_extent])
ax.set_zlim([xm - .1*max_extent, xM + .1*max_extent])
else:
ax.set_xlim([ym - .1*max_extent, yM + .1*max_extent])
ax.set_ylim([ym - .1*max_extent, yM + .1*max_extent])
ax.set_zlim([ym - .1*max_extent, yM + .1*max_extent])
# ax.plot([r0_p[0]], [r0_p[1]], 'x', markersize=3, color='black')
# ax.plot([r1_p[0]], [r1_p[1]], 'x', markersize=3, color='black')
ax.set_ylim([-4, 4])
if dim == 3:
out_fig(fig, f"{figname}_{i0}",
subfolder=subdir, axis_type=0,
name_order=1)
else:
out_fig(fig, f"{figname}_{i0}",
subfolder=subdir, axis_type=0,
name_order=1)
return scats,
dim = 2
hid_pcs_plot = pcs[0]
if dim == 3:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim([-10, 10])
ax.set_ylim([-10, 10])
ax.set_zlim([-10, 10])
else:
fig, ax = make_fig()
ax.grid(False)
scat1 = ax.scatter(*hid_pcs_plot[:num_plot, :dim].T, c=coloring,
edgecolors=edge_coloring, s=10, linewidths=.65)
ax.plot([0], [0], 'x', markersize=7)
scat2 = ax.scatter([0], [0], marker='x', s=3, c='black')
scat3 = ax.scatter([0], [0], marker='x', s=3, color='black')
scats = [scat1, scat2, scat3]
# ax.plot([0], [0], 'o', markersize=10)
if FEEDFORWARD:
snap_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
else:
snap_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 21, 26,
31]) # snap_idx = list(range(T + 1))
for i0 in snap_idx:
take_snap(i0, scats, fig, dim=dim, border=False)
print
def _cluster_holdout_test_acc_stat_fun(h, y, clust_identity,
classifier_type='logistic_regression',
num_repeats=5, train_ratio=0.8, seed=11):
np.random.seed(seed)
num_clusts = np.max(clust_identity) + 1
num_clusts_train = int(round(num_clusts*train_ratio))
num_samples = h.shape[0]
test_accs = np.zeros(num_repeats)
train_accs = np.zeros(num_repeats)
for i0 in range(num_repeats):
permutation = np.random.permutation(np.arange(len(clust_identity)))
perm_inv = np.argsort(permutation)
clust_identity_shuffled = clust_identity[permutation]
train_idx = clust_identity_shuffled <= num_clusts_train
test_idx = clust_identity_shuffled > num_clusts_train
hid_train = h[train_idx[perm_inv]]
y_train = y[train_idx[perm_inv]]
y_test = y[test_idx[perm_inv]]
hid_test = h[test_idx[perm_inv]]
if classifier_type == 'svm':
classifier = svm.LinearSVC(random_state=3*i0 + 1)
else:
classifier = linear_model.LogisticRegression(random_state=3*i0 + 1,
solver='lbfgs')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
classifier.fit(hid_train, y_train)
train_accs[i0] = classifier.score(hid_train, y_train)
test_accs[i0] = classifier.score(hid_test, y_test)
return train_accs, test_accs
def clust_holdout_over_layers(seeds, gs, train_params,
figname="clust_holdout_over_layers"):
"""
Logistic regression training and testing error on the representation
through the layers. Compares networks trained
with different choices of g_radius (specified by input parameter gs).
Parameters
----------
seeds : List[int]
List of random number seeds to use for generating instantiations of
the model and dataset. Variation over
these seeds is used to plot error bars.
gs : List[float]
Values of g_radius to iterate over.
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training. Value of g_radius
is overwritten by values in gs.
figname : str
Name of the figure to save.
"""
if not hasattr(gs, '__len__'):
gs = [gs]
layer_label = 'layer'
@memory.cache
def generate_data_table_clust(seeds, gs, train_params):
layer_label = 'layer'
clust_acc_table = pd.DataFrame(
columns=['seed', 'g_radius', 'training', layer_label, 'LR training',
'LR testing'])
train_params_loc = copy.deepcopy(train_params)
for i0, seed in enumerate(seeds):
for i1, g in enumerate(gs):
train_params_loc['g_radius'] = g
train_params_loc['model_seed'] = seed
num_pnts_dim_red = 500
model, params, run_dir = train.initialize_and_train(
**train_params_loc)
class_datasets = params['datasets']
num_train_samples = len(class_datasets['train'])
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(params['model_seed'])
X, Y = class_datasets['train'][:]
if train_params_loc['network'] == 'feedforward':
X0 = X
else:
X0 = X[:, 0]
for epoch, epoch_label in zip([0, -1], ['before', 'after']):
loader.load_model_from_epoch_and_dir(model, run_dir, epoch)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
if len(Y.shape) > 1:
Y = Y[:, -1]
cluster_identity = class_datasets['train'].cluster_identity
ds = []
for lay, h in enumerate(hid):
stat = _cluster_holdout_test_acc_stat_fun(h.numpy(),
Y.numpy(),
cluster_identity)
ds.extend([{
'seed': seed, 'g_radius': g,
'training': epoch_label, layer_label: lay,
'LR training': stat[0][k], 'LR testing': stat[1][k]
} for k in range(len(stat[0]))])
clust_acc_table = clust_acc_table.append(pd.DataFrame(ds),
ignore_index=True)
clust_acc_table['seed'] = clust_acc_table['seed'].astype('category')
clust_acc_table['g_radius'] = clust_acc_table['g_radius'].astype(
'category')
clust_acc_table['training'] = clust_acc_table['training'].astype(
'category')
return clust_acc_table
clust_acc_table = generate_data_table_clust(seeds, gs, train_params)
layers = set(clust_acc_table[layer_label])
for stage in ['LR training', 'LR testing']:
if stage == 'LR training':
clust_acc_table_stage = clust_acc_table.drop(columns=['LR testing'])
else:
clust_acc_table_stage = clust_acc_table.drop(
columns=['LR training'])
fig, ax = make_fig((1.5, 1.2))
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x=layer_label, y=stage,
data=clust_acc_table_stage, estimator=est_acc,
ci=ci_acc, style='training',
style_order=['after', 'before'], hue='g_radius')
else:
g1 = sns.lineplot(ax=ax, x=layer_label, y=stage,
data=clust_acc_table_stage, estimator=None,
units='seed', style='training',
style_order=['after', 'before'], hue='g_radius',
alpha=0.6)
g2 = sns.lineplot(ax=ax, x=layer_label, y=stage,
data=clust_acc_table_stage, estimator='mean',
ci=None, style='training',
style_order=['after', 'before'], hue='g_radius')
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
ax.set_ylim([-.01, 1.01])
ax.set_xticks(range(len(layers)))
out_fig(fig, figname + '_' + stage, subfolder=train_params[
'network'] +
'/clust_holdout_over_layers/',
show=False, save=True, axis_type=0, name_order=0,
data=clust_acc_table)
plt.close('all')
def get_stats(stat_fun, train_params_list_hue, train_params_list_style=None,
seeds=None, hue_key=None, style_key=None, *args, **kwargs):
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
style_bool = train_params_list_style is not None
if style_bool and style_key is None:
raise ValueError("Please specify a style_key.")
hue_bool = len(train_params_list_hue) > 1
if hue_bool and hue_key is None:
raise ValueError("Please specify a hue_key.")
if seeds is None:
seeds = [train_params_list_hue[0]['model_seed']]
params_cat = [[], []]
params_cat[0] = train_params_list_hue
if style_bool:
params_cat[1] = train_params_list_style
else:
params_cat[1] = [None]
table = pd.DataFrame()
if hue_bool:
table.reindex(columns=table.columns.tolist() + [hue_key])
if style_bool:
table.reindex(columns=table.columns.tolist() + [style_key])
for i0 in range(len(params_cat)): # hue params
for i1 in range(len(params_cat[i0])):
params = params_cat[i0][i1]
table_piece = stat_fun(params, hue_key, style_key, seeds,
*args, **kwargs)
table = table.append(table_piece, ignore_index=True)
if hue_key is not None:
table[hue_key] = table[hue_key].astype('category')
if style_key is not None:
table[style_key] = table[style_key].astype('category')
return table
def dim_through_training(train_params_list_hue, train_params_list_style=None,
seeds=None, hue_key=None, style_key=None, figname='',
subdir=None, multiprocess_lock=None):
if subdir is None:
subdir = train_params_list_hue[0][
'network'] + '/' + 'dim_over_training' + '/'
@memory.cache
def compute_dim_through_training(params, hue_key, style_key, seeds):
num_pnts_dim_red = 500
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
class_datasets = returned_params['datasets']
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
epochs, saves = loader.get_epochs_and_saves(run_dir)
epochs = [epoch for epoch in epochs if
epoch <= params['num_epochs']]
for i_epoch, epoch in enumerate(epochs):
loader.load_model_from_epoch_and_dir(model, run_dir, epoch)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
try:
dim = utils.get_effdim(hid[-1],
preserve_gradients=False).item()
except RuntimeError:
print("Dim computation didn't converge.")
dim = np.nan
num_updates = int(
params['num_train_samples_per_epoch']/params[
'batch_size'])*epoch
d = {
'effective_dimension': dim, 'seed': seed,
'epoch_index': i_epoch, 'epoch': epoch,
'num_updates': num_updates
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(pd.DataFrame(d, index=[0]),
ignore_index=True)
return table_piece
table = get_stats(compute_dim_through_training, train_params_list_hue,
train_params_list_style, seeds, hue_key, style_key)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
fig, ax = make_fig((1.5, 1.2))
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x='epoch_index', y='effective_dimension',
data=table, estimator=est_dim, ci=ci_dim, hue=hue_key,
style=style_key)
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
else:
g1 = sns.lineplot(ax=ax, x='epoch_index', y='effective_dimension',
data=table, estimator=None, units='seed', hue=hue_key,
style=style_key, alpha=.6)
g2 = sns.lineplot(ax=ax, x='epoch_index', y='effective_dimension',
data=table, estimator='mean', ci=None, hue=hue_key,
style=style_key)
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
ax.set_ylim([0, None])
out_fig(fig, figname, subfolder=subdir, show=False, save=True, axis_type=0,
data=table)
plt.close('all')
def dim_over_layers(train_params_list_hue, train_params_list_style=None,
seeds=None, hue_key=None, style_key=None,
figname="dim_over_layers", subdir=None, T=0,
multiprocess_lock=None, use_error_bars=None, **plot_kwargs):
"""
Effective dimension measured over layers (or timepoints if looking at an
RNN) of the network, before and after
training.
Parameters
----------
seeds : List[int]
List of random number seeds to use for generating instantiations of
the model and dataset. Variation over
these seeds is used to plot error bars.
gs : List[float]
Values of g_radius to iterate over.
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training. Value of g_radius
is overwritten by values in gs.
figname : str
Name of the figure to save.
T : int
Final timepoint to plot (if looking at an RNN). If 0, disregard this
parameter.
"""
if subdir is None:
subdir = train_params_list_hue[0]['network'] + '/dim_over_layers/'
if use_error_bars is None:
use_error_bars = USE_ERRORBARS
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_dim_over_layers(params, hue_key, style_key, seeds):
num_pnts_dim_red = 500
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
class_datasets = returned_params['datasets']
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
T = 15
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
# epochs, saves = loader.get_epochs_and_saves(run_dir)
# for i_epoch, epoch in enumerate([0, -1]):
loader.load_model_from_epoch_and_dir(model, run_dir,
params['num_epochs'])
hid = [X0]
hid += model.get_post_activations(X)[:-1]
dims = []
for h in hid:
try:
dims.append(utils.get_effdim(h,
preserve_gradients=False).item())
except RuntimeError:
dims.append(np.nan)
d = {
'effective_dimension': dims,
'layer': list(range(len(dims))), 'seed': seed
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(pd.DataFrame(d),
ignore_index=True)
return table_piece
table = get_stats(compute_dim_over_layers, train_params_list_hue,
train_params_list_style, seeds, hue_key, style_key)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
# breakpoint()
# print(table)
fig, ax = make_fig((1.5, 1.2))
# table['g_radius'] = table['g_radius'].astype('float64')
# norm = plt.Normalize(table['g_radius'].min(), table['g_radius'].max())
# sm = plt.cm.ScalarMappable(cmap="viridis", norm=norm)
# sm.set_array([])
# try:
if use_error_bars:
g = sns.lineplot(ax=ax, x='layer', y='effective_dimension',
data=table, estimator=est_dim, ci=ci_dim,
style=style_key, hue=hue_key, **plot_kwargs)
# g.figure.colorbar(sm)
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
else:
g1 = sns.lineplot(ax=ax, x='layer', y='effective_dimension',
data=table, estimator=None, units='seed',
style=style_key, hue=hue_key, alpha=0.6,
**plot_kwargs)
g2 = sns.lineplot(ax=ax, x='layer', y='effective_dimension',
data=table, estimator='mean', ci=None,
style=style_key, hue=hue_key, **plot_kwargs)
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
# except FitDataError:
# print("Plotting data invalid.")
layers = set(table['layer'])
if len(layers) < 12:
ax.set_xticks(range(len(layers)))
else:
ax.xaxis.set_major_locator(plt.MaxNLocator(
integer=True)) # ax.xaxis.set_major_locator(plt.MaxNLocator(10))
# ax.set_ylim([0, None])
# ax.set_ylim([0, 15])
out_fig(fig, figname, subfolder=subdir, show=False, save=True, axis_type=0,
data=table)
plt.close('all')
def orth_compression_through_layers(train_params_list_hue,
train_params_list_style=None, seeds=None,
hue_key=None, style_key=None,
figname="orth_compression_through_layers",
subdir=None, multiprocess_lock=None,
**plot_kwargs):
"""
"""
# if train_params_list_hue[0]['loss'] != 'mse_scalar':
# raise ValueError("Expected scalar mse loss.")
if subdir is None:
subdir = train_params_list_hue[0][
'network'] + '/orth_compression_through_layers/'
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_orth_compression_through_layers(params, hue_key, style_key,
seeds):
num_pnts = 500
# num_dims = 2
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
num_dims = int(returned_params['X_dim'])
class_datasets = returned_params['datasets']
def pca(v):
out = utils.get_pcs(v, list(range(num_dims)),
return_extra=True)
h_pcs = out['pca_projection']
v = out['pca_projectors'][:, :num_dims]
return h_pcs, v
class_datasets['train'].max_samples = num_pnts
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
T = 0
# T = 20
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
epochs, saves = loader.get_epochs_and_saves(run_dir)
epochs = [epoch for epoch in epochs if
epoch <= params['num_epochs']]
r0s = []
r1s = []
for save in saves[-2][:]:
loader.load_model_from_epoch_and_dir(model, run_dir,
epochs[-1], save)
if params['network'] == 'feedforward':
r = model.layer_weights[-1].detach().clone().T
else:
r = model.Wout.detach().clone()
r0s.append(r[0].double())
if params['loss'] != 'mse_scalar':
r1s.append(r[1].double())
r0 = torch.mean(torch.stack(r0s), dim=0)
if params['loss'] != 'mse_scalar':
r1 = torch.mean(torch.stack(r1s), dim=0)
if params['network'] == 'feedforward':
y = Y.flatten()
else:
y = Y[:, -1]
# for i_epoch, epoch in enumerate([0, -1]):
loader.load_model_from_epoch_and_dir(model, run_dir, 0)
hid0 = [X0]
hid0 += model.get_post_activations(X)[:-1]
loader.load_model_from_epoch_and_dir(model, run_dir,
params['num_epochs'])
hid = [X0]
hid += model.get_post_activations(X)[:-1]
rs = []
avg_ratios = []
for i0, (h, h0) in enumerate(zip(hid, hid0)):
h = h.double()
h_pcs, v = pca(h)
h0 = h0.double()
h0_pcs, v0 = pca(h0)
if params['loss'] == 'mse_scalar':
h_proj = h_pcs@orth_proj(r0@v).T
h0_proj = h0_pcs@orth_proj(r0@v0).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio = torch.mean(ratios).item()
else:
h_proj = h_pcs[y == 0]@orth_proj(
r0@v).T # todo: maybe need to use yh (net
# prediction)
h0_proj = h0_pcs[y == 0]@orth_proj(r0@v0).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio1 = torch.mean(ratios).item()
h_proj = h_pcs[y == 1]@orth_proj(r1@v).T
h0_proj = h0_pcs[y == 1]@orth_proj(r1@v).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio2 = torch.mean(ratios).item()
avg_ratio = (avg_ratio1 + avg_ratio2)/2
avg_ratios.append(avg_ratio)
# u, s, v = torch.svd(h)
# proj_mags = [(h @ r_orth.T)]
# def get_shrink(r, h, h0):
d = {
'projections_magnitude': avg_ratios,
'layer': list(range(len(avg_ratios))), 'seed': seed
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(pd.DataFrame(d),
ignore_index=True)
return table_piece
table = get_stats(compute_orth_compression_through_layers,
train_params_list_hue, train_params_list_style, seeds,
hue_key, style_key)
print(table)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
fig, ax = make_fig((1.5, 1.2))
try:
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x='layer', y='projections_magnitude',
data=table, estimator='mean', ci=68,
style=style_key, hue=hue_key)
else:
g = sns.lineplot(ax=ax, x='layer', y='projections_magnitude',
data=table, estimator=None, units='seed',
style=style_key, hue=hue_key)
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
except FitDataError:
print("Invalid data.")
layers = set(table['layer'])
if len(layers) < 12:
ax.set_xticks(range(len(layers)))
else:
ax.xaxis.set_major_locator(plt.MaxNLocator(10))
ax.set_ylim([-.05, None])
out_fig(fig, figname, subfolder=subdir, show=False, save=True, axis_type=0,
data=table)
plt.close('all')
def orth_compression_through_training(train_params_list_hue,
train_params_list_style=None, seeds=None,
hue_key=None, style_key=None,
figname="orth_compression_through_training",
subdir=None, multiprocess_lock=None,
**plot_kwargs):
"""
"""
# if train_params_list_hue[0]['loss'] != 'mse_scalar':
# raise ValueError("Expected scalar mse loss.")
if subdir is None:
subdir = train_params_list_hue[0][
'network'] + '/orth_compression_through_training/'
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_orth_compression_through_training(params, hue_key, style_key,
seeds):
num_pnts = 500
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
num_dims = int(returned_params['X_dim'])
class_datasets = returned_params['datasets']
def pca(v):
out = utils.get_pcs(v, list(range(num_dims)),
return_extra=True)
h_pcs = out['pca_projection']
v = out['pca_projectors'][:, :num_dims]
return h_pcs, v
class_datasets['train'].max_samples = num_pnts
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
if params['network'] == 'feedforward':
y = Y
else:
y = Y[:, -1]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
# epochs, saves = loader.get_epochs_and_saves(run_dir)
# for i_epoch, epoch in enumerate([0, -1]):
loader.load_model_from_epoch_and_dir(model, run_dir, 0)
# hid0 = [X0]
h0 = model.get_post_activations(X)[:-1][-1].double()
h0_pcs, v0 = pca(h0)
# avg_ratios = []
epochs, saves = loader.get_epochs_and_saves(run_dir)
epochs = [epoch for epoch in epochs if
epoch <= params['num_epochs']]
# saves = saves[params['num_epochs']-1]
for epoch_idx, epoch in enumerate(epochs):
loader.load_model_from_epoch_and_dir(model, run_dir, epoch)
h = model.get_post_activations(X)[:-1][-1].double()
r0s = []
r1s = []
for save in saves[-2][:]:
loader.load_model_from_epoch_and_dir(model, run_dir,
epoch, save)
if params['network'] == 'feedforward':
r = model.layer_weights[
-1].detach().clone().double().T
else:
r = model.Wout.detach().double().clone()
r0s.append(r[0].double())
if params['loss'] != 'mse_scalar':
r1s.append(r[1].double())
r0 = torch.mean(torch.stack(r0s), dim=0)
if params['loss'] != 'mse_scalar':
r1 = torch.mean(torch.stack(r1s), dim=0)
h_pcs, v = pca(h)
if params['loss'] == 'mse_scalar':
h_proj = h_pcs@orth_proj(r0@v).T
h0_proj = h0_pcs@orth_proj(r0@v0).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio = torch.mean(ratios).item()
else:
h_proj = h_pcs[y == 0]@orth_proj(
r0@v).T # todo: maybe need to use yh (net
# prediction)
h0_proj = h0_pcs[y == 0]@orth_proj(r0@v0).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio1 = torch.mean(ratios).item()
h_proj = h_pcs[y == 1]@orth_proj(r1@v).T
h0_proj = h0_pcs[y == 1]@orth_proj(r1@v).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio2 = torch.mean(ratios).item()
avg_ratio = (avg_ratio1 + avg_ratio2)/2
d = {
'projections_magnitude': avg_ratio, 'epoch': epoch,
'epoch_idx': epoch_idx, 'seed': seed
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(pd.DataFrame(d, index=[0]),
ignore_index=True)
return table_piece
table = get_stats(compute_orth_compression_through_training,
train_params_list_hue, train_params_list_style, seeds,
hue_key, style_key)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
# print(table)
fig, ax = make_fig((1.5, 1.2))
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x='epoch_idx', y='projections_magnitude',
data=table, estimator='mean', ci=68, style=style_key,
hue=hue_key)
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
else:
g1 = sns.lineplot(ax=ax, x='epoch_idx', y='projections_magnitude',
data=table, estimator=None, units='seed',
style=style_key, hue=hue_key, alpha=0.6)
g2 = sns.lineplot(ax=ax, x='epoch_idx', y='projections_magnitude',
data=table, estimator='mean', ci=None,
style=style_key, hue=hue_key)
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
ax.set_ylim([-0.05, None])
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
out_fig(fig, figname, subfolder=subdir, show=False, save=True, axis_type=0,
data=table)
plt.close('all')
def orth_compression_through_training_input_sep(train_params_list_hue,
train_params_list_style=None,
seeds=None, hue_key=None,
style_key=None,
figname="orth_compression_through_training_input_sep",
subdir=None,
multiprocess_lock=None,
**plot_kwargs):
"""
"""
# if train_params_list_hue[0]['loss'] != 'mse_scalar':
# raise ValueError("Expected scalar mse loss.")
if subdir is None:
subdir = train_params_list_hue[0][
'network'] + \
'/orth_compression_through_training_input_sep/'
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_orth_compression_through_training_input_sep(params, hue_key,
style_key, seeds):
num_pnts = 500
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
num_dims = int(returned_params['X_dim'])
class_datasets = returned_params['datasets']
def pca(v):
out = utils.get_pcs(v, list(range(num_dims)),
return_extra=True)
h_pcs = out['pca_projection']
v = out['pca_projectors'][:, :num_dims]
return h_pcs, v
class_datasets['train'].max_samples = num_pnts
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
if params['network'] == 'feedforward':
y = Y
else:
y = Y[:, -1]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
# epochs, saves = loader.get_epochs_and_saves(run_dir)
# for i_epoch, epoch in enumerate([0, -1]):
loader.load_model_from_epoch_and_dir(model, run_dir, 0)
# hid0 = [X0]
h0 = model.get_post_activations(X)[:-1][-1].double()
h0_pcs, v0 = pca(h0)
# avg_ratios = []
epochs, saves = loader.get_epochs_and_saves(run_dir)
epochs = [epoch for epoch in epochs if
epoch <= params['num_epochs']]
# saves = saves[params['num_epochs']-1]
for epoch_idx, epoch in enumerate(epochs):
loader.load_model_from_epoch_and_dir(model, run_dir, epoch)
h = model.get_post_activations(X)[:-1][-1].double()
# h_pcs, v = pca(h)
# class_diff = torch.mean(h_pcs[y == 0], dim=0) -
# torch.mean(
# h_pcs[y == 1], dim=0)
class_diff = torch.mean(h[y == 0], dim=0) - torch.mean(
h[y == 1], dim=0)
h_proj = h@orth_proj(class_diff).T
h0_proj = h0@orth_proj(class_diff).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio = torch.mean(ratios).item()
# if params['loss'] == 'mse_scalar':
# # h_proj = h_pcs@orth_proj(class_diff@v).T
# # h0_proj = h0_pcs@orth_proj(class_diff@v).T
#
# else:
# h_proj = h_pcs[y == 0]@orth_proj(
# r0@v).T # todo: maybe need to use yh (net
# # prediction. Doesn't matter if net is perfectly )
# h0_proj = h0_pcs[y == 0]@orth_proj(r0@v0).T
# h_norms = torch.norm(h_proj, dim=1)
# h0_norms = torch.norm(h0_proj, dim=1)
# ratios = h_norms/h0_norms
# avg_ratio1 = torch.mean(ratios).item()
# h_proj = h_pcs[y == 1]@orth_proj(r1@v).T
# h0_proj = h0_pcs[y == 1]@orth_proj(r1@v).T
# h_norms = torch.norm(h_proj, dim=1)
# h0_norms = torch.norm(h0_proj, dim=1)
# ratios = h_norms/h0_norms
# avg_ratio2 = torch.mean(ratios).item()
#
# avg_ratio = (avg_ratio1 + avg_ratio2)/2
d = {
'projections_magnitude': avg_ratio, 'epoch': epoch,
'epoch_idx': epoch_idx, 'seed': seed
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(pd.DataFrame(d, index=[0]),
ignore_index=True)
return table_piece
table = get_stats(compute_orth_compression_through_training_input_sep,
train_params_list_hue, train_params_list_style, seeds,
hue_key, style_key)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
# print(table)
fig, ax = make_fig((1.5, 1.2))
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x='epoch_idx', y='projections_magnitude',
data=table, estimator='mean', ci=68, style=style_key,
hue=hue_key)
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
else:
g1 = sns.lineplot(ax=ax, x='epoch_idx', y='projections_magnitude',
data=table, estimator=None, units='seed',
style=style_key, hue=hue_key, alpha=0.6,
**plot_kwargs)
g2 = sns.lineplot(ax=ax, x='epoch_idx', y='projections_magnitude',
data=table, estimator='mean', ci=None,
style=style_key, hue=hue_key, **plot_kwargs)
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
ax.set_ylim([-0.05, None])
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
out_fig(fig, figname, subfolder=subdir, show=False, save=True, axis_type=0,
data=table)
plt.close('all')
def clust_holdout_over_layers(train_params_list_hue,
train_params_list_style=None,
seeds=None, hue_key=None, style_key=None,
figname="dim_over_layers", subdir=None, T=0,
multiprocess_lock=None, use_error_bars=None,
**plot_kwargs):
"""
Effective dimension measured over layers (or timepoints if looking at an
RNN) of the network, before and after
training.
Parameters
----------
seeds : List[int]
List of random number seeds to use for generating instantiations of
the model and dataset. Variation over
these seeds is used to plot error bars.
gs : List[float]
Values of g_radius to iterate over.
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training. Value of g_radius
is overwritten by values in gs.
figname : str
Name of the figure to save.
T : int
Final timepoint to plot (if looking at an RNN). If 0, disregard this
parameter.
"""
if subdir is None:
subdir = train_params_list_hue[0]['network'] + '/dim_over_layers/'
if use_error_bars is None:
use_error_bars = USE_ERRORBARS
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_clust_holdout_over_layers(params, hue_key, style_key, seeds):
num_pnts_dim_red = 500
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
class_datasets = returned_params['datasets']
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
# epochs, saves = loader.get_epochs_and_saves(run_dir)
# for i_epoch, epoch in enumerate([0, -1]):
loader.load_model_from_epoch_and_dir(model, run_dir,
params['num_epochs'])
hid = [X0]
hid += model.get_post_activations(X)[:-1]
dims = []
if len(Y.shape) > 1:
Y = Y[:, -1]
cluster_identity = class_datasets['train'].cluster_identity
for lay, h in enumerate(hid):
stat = _cluster_holdout_test_acc_stat_fun(h.numpy(),
Y.numpy(),
cluster_identity)
d = {
'LR training': np.mean(stat[0]),
'LR testing': np.mean(stat[1]),
'layer': lay, 'seed': seed
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(
pd.DataFrame(d, index=[0]),
ignore_index=True)
# ds.extend([{
# 'seed': seed, 'g_radius': g,
# 'training': epoch_label, layer_label: lay,
# 'LR training': stat[0][k], 'LR testing': stat[1][k]
# } for k in range(len(stat[0]))])
return table_piece
table = get_stats(compute_clust_holdout_over_layers, train_params_list_hue,
train_params_list_style, seeds, hue_key, style_key)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
# breakpoint()
# print(table)
fig, ax = make_fig((1.5, 1.2))
# table['g_radius'] = table['g_radius'].astype('float64')
# norm = plt.Normalize(table['g_radius'].min(), table['g_radius'].max())
# sm = plt.cm.ScalarMappable(cmap="viridis", norm=norm)
# sm.set_array([])
# try:
layers = set(table['layer'])
for stage in ['LR training', 'LR testing']:
if stage == 'LR training':
clust_acc_table_stage = table.drop(columns=['LR testing'])
else:
clust_acc_table_stage = table.drop(
columns=['LR training'])
fig, ax = make_fig((1.5, 1.2))
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x='layer', y=stage,
data=clust_acc_table_stage, estimator=est_acc,
ci=ci_acc, hue=hue_key, style=style_key,
**plot_kwargs)
# g = sns.lineplot(ax=ax, x='layer', y=stage,
# data=clust_acc_table_stage, estimator='mean',
# ci=95, hue=hue_key, style=style_key)
else: # This code probably doesn't work
g1 = sns.lineplot(ax=ax, x='layer', y=stage,
data=clust_acc_table_stage, estimator=None,
units='seed', style='training',
style_order=['after', 'before'], hue='g_radius',
alpha=0.6)
g2 = sns.lineplot(ax=ax, x='layer', y=stage,
data=clust_acc_table_stage, estimator='mean',
ci=None, style='training',
style_order=['after', 'before'], hue='g_radius')
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
ax.set_ylim([-.01, 1.01])
ax.set_xticks(range(len(layers)))
if len(layers) < 12:
ax.set_xticks(range(len(layers)))
else:
ax.xaxis.set_major_locator(plt.MaxNLocator(
integer=True)) # ax.xaxis.set_major_locator(plt.MaxNLocator(
# 10))
# ax.set_ylim([0, None])
#
out_fig(fig, figname + '_' + stage, subfolder=subdir, show=False,
save=True, axis_type=0,
data=table)
#
# plt.close('all')
def acc_over_training(train_params_list_hue, train_params_list_style=None,
seeds=None, hue_key=None, style_key=None,
figname="acc_over_training", subdir=None,
multiprocess_lock=None, **plot_kwargs):
"""
Parameters
----------
seeds : List[int]
List of random number seeds to use for generating instantiations of the
model and dataset. Variation over these seeds is used to plot error
bars.
gs : List[float]
Values of g_radius to iterate over.
train_params : dict
Dictionary of training parameters that specify the model and dataset to
use for training. Value of g_radius is overwritten by values in gs.
figname : str
Name of the figure to save.
T : int
Final timepoint to plot (if looking at an RNN). If 0, disregard this
parameter.
"""
if subdir is None:
subdir = Path('acc_over_training/')
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_acc_over_training(params, hue_key, style_key, seeds):
num_pnts = 1000
table_piece = | pd.DataFrame() | pandas.DataFrame |
import keras
from keras.models import Model
from keras.layers import Input,Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import multi_gpu_model
from keras.utils import plot_model
from keras import losses
import os
import tensorflow as tf
from keras import backend as K
import DataGenerator as dg
import get_modelv2_3
import get_model
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
import pandas as pd
import numpy as np
from keras.models import load_model
import re
import seaborn as sns
from sklearn.linear_model import LinearRegression
import scipy
import warnings
import sys
from sklearn.metrics import roc_curve, auc
import time
warnings.filterwarnings('ignore')
os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2'
class multi_task_training_respect:
def __init__(self):
self.model = keras.Model()
self.model_class_task= keras.Model()
self.model_reg_task= keras.Model()
self.lr1 = 0.0001
self.lr2 = 0.0001
self.alpha = 0.5
self.patience_class = 6
self.patience_reg = 6
self.font1 = {
'weight': 'normal',
'size': 16,
}
self.font2 = {
'weight': 'normal',
'size': 23,
}
def get_batch_data(self,prot, comp, y, batch_count, batch_size, batch_count_per_epoch):
batch_count = batch_count % batch_count_per_epoch
batch_prot = prot[batch_size * batch_count:min(batch_size * (batch_count + 1), len(prot))]
batch_comp = comp[batch_size * batch_count:min(batch_size * (batch_count + 1), len(prot))]
batch_y = y[batch_size * batch_count:min(batch_size * (batch_count + 1), len(prot))]
return batch_prot, batch_comp, batch_y
def draw_loss_and_accuracy_curve(self,history_class, history_class_vali, model_name, save_dir):
train_loss = []
vali_loss = []
train_accuracy = []
vali_accuracy = []
for tmp in history_class:
train_loss.append(tmp[0])
train_accuracy.append(tmp[1])
for tmp in history_class_vali:
vali_loss.append(tmp[0])
vali_accuracy.append(tmp[1])
epochs = range(1, len(history_class) + 1)
##---------------draw loss curve------------------##
plt.figure(figsize=(10, 10))
plt.plot(epochs, train_loss, 'b', label='Classification training loss')
plt.plot(epochs, vali_loss, 'r', label='Classification validation loss')
plt.title('Classification Training and Validation Loss', self.font2)
plt.xlabel('Epochs', self.font2)
plt.ylabel('Loss', self.font2)
plt.legend(prop=self.font1)
plt.savefig(save_dir + '/%s_class_training_validation_loss.png' % model_name)
##---------------draw accuracy curve------------------##
plt.figure(figsize=(10, 10))
plt.plot(epochs, train_accuracy, 'b', label='Classification training accuracy')
plt.plot(epochs, vali_accuracy, 'r', label='Classification validation accuracy')
plt.title('Training and Validation Accuracy', self.font2)
plt.xlabel('Epochs', self.font2)
plt.ylabel('Accuracy', self.font2)
plt.legend(prop=self.font1)
plt.savefig(save_dir + '/%s_class_training_validation_accuracy.png' % model_name)
def draw_loss_and_mse_curve(self,history_reg, history_reg_vali, model_name, save_dir):
train_loss = []
vali_loss = []
train_mse = []
vali_mse = []
for tmp in history_reg:
train_loss.append(tmp[0])
train_mse.append(tmp[1])
for tmp in history_reg_vali:
vali_loss.append(tmp[0])
vali_mse.append(tmp[1])
epochs = range(1, len(history_reg) + 1)
##---------------draw loss curve------------------##
plt.figure(figsize=(10.3, 10))
plt.plot(epochs, train_loss, 'b', label='Regression training loss')
plt.plot(epochs, vali_loss, 'r', label='Regression validation loss')
plt.title('Regression Training and Validation Loss', self.font2)
plt.xlabel('Epochs', self.font2)
plt.ylabel('Loss', self.font2)
plt.legend(prop=self.font1)
plt.savefig(save_dir + '/%s_reg_training_validation_loss.png' % model_name)
##---------------draw accuracy curve------------------##
plt.figure(figsize=(10, 10))
plt.plot(epochs, train_mse, 'b', label='Regression training mse')
plt.plot(epochs, vali_mse, 'r', label='Regression validation mse')
plt.title('Regression Training and Validation MSE', self.font2)
plt.xlabel('Epochs', self.font2)
plt.ylabel('MSE', self.font2)
plt.legend(prop=self.font1)
plt.savefig(save_dir + '/%s_reg_training_validation_mse.png' % model_name)
def mean_squared_error_l2(self,y_true, y_pred, lmbda=0.01):
cost = K.mean(K.square(y_pred - y_true))
# weights = self.model.get_weights()
weights = []
for layer in self.model_reg_task.layers:
# print(layer)
weights = weights + layer.get_weights()
# print (weights)
result = tf.reduce_sum([tf.reduce_sum(tf.pow(wi, 2)) for wi in weights])
l2 = lmbda * result # K.sum([K.square(wi) for wi in weights])
return cost + l2
def train_model(self,class_training_file,class_validation_file,reg_training_file,reg_validation_file,model_name,
reg_batch_size=128,class_batch_size=128,class_epoch = 50,reg_epoch = 100,
pro_branch_switch1 = 'inception_block', pro_branch_switch2 = 'inception_block',
pro_branch_switch3='inception_block_b', pro_add_attention = False,
comp_branch_switch1 = 'inception_block', comp_branch_switch2 = 'inception_block',
comp_branch_switch3 = 'inception_block_b', comp_add_attention = False):#reg_size=256
##2.get_model
save_dir = os.path.join(os.getcwd(), 'models',model_name)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
self.model_class_task, self.model_reg_task = get_model.get_multi_model(save_dir, self.alpha,
pro_branch_switch1=pro_branch_switch1,pro_branch_switch2=pro_branch_switch2,
pro_branch_switch3=pro_branch_switch3,pro_add_attention=pro_add_attention,
comp_branch_switch1=comp_branch_switch1,comp_branch_switch2=comp_branch_switch2,
comp_branch_switch3=comp_branch_switch3,comp_add_attention=comp_add_attention)
optimizer1 = keras.optimizers.Adam(lr=self.lr1)
self.model_reg_task.compile(optimizer=optimizer1,
loss=self.mean_squared_error_l2,#'mean_squared_error'
metrics=['mse','mae'])
optimizer2 = keras.optimizers.Adam(lr=self.lr2)
self.model_class_task.compile(optimizer=optimizer2,loss='binary_crossentropy',metrics=['accuracy'])
##1.read data
print("Starting read reg training data:")
reg_train_generator = dg.read_reg_generator(reg_training_file, reg_batch_size)
reg_vali_prot, reg_vali_comp, reg_vali_value = dg.read_reg(reg_validation_file)
print('regression validation data shape:', len(reg_vali_prot))
class_train_generator = dg.read_class_generator(class_training_file, class_batch_size)
class_vali_prot, class_vali_comp, class_vali_label = dg.read_class(class_validation_file)
print('classification validation data shape:', len(class_vali_prot))
##3.training model
#before train prepare
batch_count_of_class=0
batch_count_per_epoch_class=189109//class_batch_size
batch_count_of_reg = 0
batch_count_per_epoch_reg = 18071 // reg_batch_size
epoch_class = 0
epoch_reg=0
history_class=[]
history_class_vali=[]
history_reg=[]
history_reg_vali=[]
class_erally_stop_flag=1
reg_erally_stop_flag = 1
class_batch_count = class_epoch * batch_count_per_epoch_class
reg_batch_count = reg_epoch * batch_count_per_epoch_reg
K = reg_batch_count/class_batch_count
total_batch_count=class_batch_count+reg_batch_count
#start train
reg_min_loss = float('inf')
reg_min_loss_index = 0
class_min_loss=float('inf')
class_min_loss_index=0
best_reg_model = None
best_class_model = None
best_reg_file = save_dir + "/%s_best_reg_model.hdf5" % model_name
best_class_file = save_dir + "/%s_best_class_model.hdf5" % model_name
reg_loss=[]
class_loss=[]
for i in range(total_batch_count):
#regression
if np.random.rand() * (1+K) >= 1 and reg_erally_stop_flag and epoch_reg<reg_epoch:
print('batch %d(reg):'%i)
reg_batch_prot, reg_batch_comp, reg_batch_value = next(reg_train_generator)
tmp_loss=self.model_reg_task.train_on_batch([reg_batch_prot, reg_batch_comp], reg_batch_value)
reg_loss.append(tmp_loss)
batch_count_of_reg+=1
if batch_count_of_reg % batch_count_per_epoch_reg==0 and batch_count_of_reg>0:
epoch_reg += 1
print("regression epoch %d:"%epoch_reg)
#train performance:loss, mse, mae
print(' regression training loss=',np.mean(reg_loss,axis=0))
history_reg.append(np.mean(reg_loss,axis=0))
reg_loss=[]
#validation performance
score=self.model_reg_task.evaluate([reg_vali_prot,reg_vali_comp],reg_vali_value)
print(' regression evaluation loss=',score)
history_reg_vali.append(score)
#checkpoint and earlly stop
if epoch_reg-reg_min_loss_index>=self.patience_reg:
reg_erally_stop_flag=0
if score[0]<reg_min_loss:
reg_min_loss_index=epoch_reg
reg_min_loss=score[0]
#checkpoint
best_reg_model = self.model_reg_task
# classification
else:
if class_erally_stop_flag and epoch_class<class_epoch:
print('batch %d(class):' % i)
class_batch_prot, class_batch_comp, class_batch_label = next(class_train_generator)
tmp_loss=self.model_class_task.train_on_batch([class_batch_prot, class_batch_comp], class_batch_label)
class_loss.append(tmp_loss)
batch_count_of_class += 1
if batch_count_of_class % batch_count_per_epoch_class == 0 and batch_count_of_class>0:
epoch_class += 1
print("classification epoch %d:"%epoch_class)
# train performance:loss, mse, mae
print(' classification training loss=',np.mean(class_loss,axis=0))
history_class.append(np.mean(class_loss,axis=0))
class_loss=[]#
accuracy = self.model_class_task.evaluate([class_vali_prot, class_vali_comp], class_vali_label)
# validation performance
print(' classification evaluation loss=',accuracy)
history_class_vali.append(accuracy)
# checkpoint and earlly stop
if epoch_class - class_min_loss_index >= self.patience_class:
class_erally_stop_flag = 0
if accuracy[0] < class_min_loss:
class_min_loss_index = epoch_class
class_min_loss = accuracy[0]
# checkpoint
best_class_model = self.model_class_task
##5.save model
#(1).class model
model_path = os.path.join(save_dir,model_name+'_class.h5')
best_class_model.save(model_path)
#(2).reg model
model_path = os.path.join(save_dir,model_name+'_reg.h5')
best_reg_model.save(model_path)
print("save model!")
def save_predict_result(self,predict_result,real_label_or_value,model_name,class_or_reg,type):
if predict_result.shape[1] == 1:
if class_or_reg=='class':
df = predict_result
df.columns = ['predict_label']
else:
df = predict_result
df.columns = ['predict_value']
else:
df = predict_result
df.columns = ['predict_label','predict_value']
if class_or_reg=='class':
df['real_lable'] = real_label_or_value
else:
df['real_value'] = real_label_or_value
df['set']=type
if not os.path.exists('predict_value'):
os.mkdir('predict_value')
df.to_csv('predict_value/multi-task_model_%s_%s_%s_predict_result.csv' % (model_name,class_or_reg,type),
index=False)
print('predict_value/multi-task_model_%s_%s_%s_predict_result.csv has been saved!' % (model_name,class_or_reg,type))
return df
def computer_parameter_draw_scatter_plot(self,predictions, model_name):
sns.set(context='paper', style='white')
sns.set_color_codes()
set_colors = {'train': 'b', 'validation': 'green', 'test': 'purple'}
for set_name, table in predictions.groupby('set'):
rmse = ((table['predict_value'] - table['real_value']) ** 2).mean() ** 0.5
mae = (np.abs(table['predict_value'] - table['real_value'])).mean()
corr = scipy.stats.pearsonr(table['predict_value'], table['real_value'])
lr = LinearRegression()
lr.fit(table[['predict_value']], table['real_value'])
y_ = lr.predict(table[['predict_value']])
sd = (((table["real_value"] - y_) ** 2).sum() / (len(table) - 1)) ** 0.5
print("%10s set: RMSE=%.3f, MAE=%.3f, R=%.2f (p=%.2e), SD=%.3f" %
(set_name, rmse, mae, *corr, sd))
grid = sns.jointplot('real_value', 'predict_value', data=table, stat_func=None, color=set_colors[set_name],
space=0, size=4, ratio=4, s=20, edgecolor='w', ylim=(0, 16), xlim=(0, 16)) # (0.16)
grid.set_axis_labels('real', 'predicted')#, fontsize=16
grid.ax_joint.set_xticks(range(0, 16, 5))
grid.ax_joint.set_yticks(range(0, 16, 5))
a = {'train': 'training', 'validation': 'validation', 'test': 'test'}
set_name=a[set_name]
grid.ax_joint.text(1, 14, set_name + ' set', fontsize=14) # 调整标题大小
grid.ax_joint.text(16, 19.5, 'RMSE: %.3f' % (rmse), fontsize=9)
grid.ax_joint.text(16, 18.5, 'MAE: %.3f ' % mae, fontsize=9)
grid.ax_joint.text(16, 17.5, 'R: %.2f ' % corr[0], fontsize=9)
grid.ax_joint.text(16, 16.5, 'SD: %.3f ' % sd, fontsize=9)
grid.fig.savefig('%s_%s_scatter_plot.jpg' %(model_name,set_name), dpi=400)
def draw_ROC_curve(self,predictions, model_name):
set_colors = {'train': 'b', 'validation': 'green', 'test': 'purple','independent test':'r'}
for set_name, table in predictions.groupby('set'):
fpr, tpr, threshold = roc_curve(table['real_lable'],table['predict_label'])
roc_auc = auc(fpr, tpr)
plt.figure(figsize=(10, 10))
lw = 2
plt.plot(fpr, tpr, color=set_colors[set_name],
lw=lw, label='ROC curve (auc = %0.3f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'b--', lw=lw,
label='Random guess (auc = 0.5)')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.tick_params(labelsize=20)
plt.xlabel('False Positive Rate', self.font2)
plt.ylabel('True Positive Rate', self.font2)
# plt.title('ROC curv')
plt.legend(loc="lower right", prop=self.font1)
plt.savefig("%s_%s_ROC_curve.png" %(model_name,set_name))
def test_model(self,model_file,class_test_file,class_train_file,class_vali_file,
reg_test_file,reg_train_file,reg_vali_file):
##read data
print('starting read data!')
#1.train data
class_train_prot, class_train_comp, class_train_label=dg.multi_process_read_pro_com_file(class_train_file)
reg_train_prot, reg_train_comp,_, reg_train_value=dg.multi_process_read_pro_com_file_regression(reg_train_file)
#2.validation data
class_vali_prot, class_vali_comp, class_vali_label = dg.multi_process_read_pro_com_file(class_vali_file)
reg_vali_prot, reg_vali_comp, _,reg_vali_value = dg.multi_process_read_pro_com_file_regression(reg_vali_file)
#3.test data
class_test_prot, class_test_comp, class_test_label = dg.multi_process_read_pro_com_file(class_test_file)
reg_test_prot, reg_test_comp,_, reg_test_value = dg.multi_process_read_pro_com_file_regression(reg_test_file)
print('classification data size:', len(class_train_prot), len(class_vali_prot), len(class_test_prot))
print('regression data size:', len(reg_train_prot),len(reg_vali_prot),len(reg_test_prot))
##load_model
print('loading modle!')
model = load_model(model_file)
tmp = model_file.split('/')[-1]
model_name = re.findall(r"(.+?).h5", tmp)[0]
## saving predict value
#predict value
#1.train
class_train_predict_value = model.predict([class_train_prot, class_train_comp])
class_train_predict_value_df=pd.DataFrame(class_train_predict_value[0],columns=['label'])
class_train_predict_value_df['value']=class_train_predict_value[1]
reg_train_predict_value = model.predict([reg_train_prot, reg_train_comp])
reg_train_predict_value_df=pd.DataFrame(reg_train_predict_value[0],columns=['label'])
reg_train_predict_value_df['value']=reg_train_predict_value[1]
#2.vali
class_vali_predict_value = model.predict([class_vali_prot, class_vali_comp])
class_vali_predict_value_df = pd.DataFrame(class_vali_predict_value[0])
class_vali_predict_value_df['value']=class_vali_predict_value[1]
reg_vali_predict_value = model.predict([reg_vali_prot, reg_vali_comp])
reg_vali_predict_value_df = | pd.DataFrame(reg_vali_predict_value[0]) | pandas.DataFrame |
from pandas import Series, Period, PeriodIndex, date_range
class create_period_index_from_date_range(object):
goal_time = 0.2
def time_period_index(self):
# Simulate irregular PeriodIndex
PeriodIndex(date_range('1985', periods=1000).to_pydatetime(), freq='D')
class period_setitem(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = | date_range(start='1/1/2000', periods=self.N, freq='T') | pandas.date_range |
import os
import uuid
from datetime import datetime
from time import sleep
import fsspec
import pandas as pd
import pytest
import v3iofs
from storey import EmitEveryEvent
import mlrun
import mlrun.feature_store as fs
from mlrun import store_manager
from mlrun.datastore.sources import CSVSource, ParquetSource
from mlrun.datastore.targets import CSVTarget, NoSqlTarget, ParquetTarget
from mlrun.features import Entity
from tests.system.base import TestMLRunSystem
@TestMLRunSystem.skip_test_if_env_not_configured
# Marked as enterprise because of v3io mount and remote spark
@pytest.mark.enterprise
class TestFeatureStoreSparkEngine(TestMLRunSystem):
project_name = "fs-system-spark-engine"
spark_service = ""
pq_source = "testdata.parquet"
csv_source = "testdata.csv"
spark_image_deployed = (
False # Set to True if you want to avoid the image building phase
)
test_branch = "" # For testing specific branch. e.g.: "https://github.com/mlrun/mlrun.git@development"
@classmethod
def _init_env_from_file(cls):
env = cls._get_env_from_file()
cls.spark_service = env["MLRUN_SYSTEM_TESTS_DEFAULT_SPARK_SERVICE"]
def get_local_pq_source_path(self):
return os.path.relpath(str(self.assets_path / self.pq_source))
def get_remote_pq_source_path(self, without_prefix=False):
path = "v3io://"
if without_prefix:
path = ""
path += "/bigdata/" + self.pq_source
return path
def get_local_csv_source_path(self):
return os.path.relpath(str(self.assets_path / self.csv_source))
def get_remote_csv_source_path(self, without_prefix=False):
path = "v3io://"
if without_prefix:
path = ""
path += "/bigdata/" + self.csv_source
return path
def custom_setup(self):
from mlrun import get_run_db
from mlrun.run import new_function
from mlrun.runtimes import RemoteSparkRuntime
self._init_env_from_file()
if not self.spark_image_deployed:
store, _ = store_manager.get_or_create_store(
self.get_remote_pq_source_path()
)
store.upload(
self.get_remote_pq_source_path(without_prefix=True),
self.get_local_pq_source_path(),
)
store, _ = store_manager.get_or_create_store(
self.get_remote_csv_source_path()
)
store.upload(
self.get_remote_csv_source_path(without_prefix=True),
self.get_local_csv_source_path(),
)
if not self.test_branch:
RemoteSparkRuntime.deploy_default_image()
else:
sj = new_function(
kind="remote-spark", name="remote-spark-default-image-deploy-temp"
)
sj.spec.build.image = RemoteSparkRuntime.default_image
sj.with_spark_service(spark_service="dummy-spark")
sj.spec.build.commands = ["pip install git+" + self.test_branch]
sj.deploy(with_mlrun=False)
get_run_db().delete_function(name=sj.metadata.name)
self.spark_image_deployed = True
def test_basic_remote_spark_ingest(self):
key = "patient_id"
measurements = fs.FeatureSet(
"measurements",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
engine="spark",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
fs.ingest(
measurements,
source,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
assert measurements.status.targets[0].run_id is not None
def test_basic_remote_spark_ingest_csv(self):
key = "patient_id"
name = "measurements"
measurements = fs.FeatureSet(
name,
entities=[fs.Entity(key)],
engine="spark",
)
source = CSVSource(
"mycsv", path=self.get_remote_csv_source_path(), time_field="timestamp"
)
fs.ingest(
measurements,
source,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
features = [f"{name}.*"]
vec = fs.FeatureVector("test-vec", features)
resp = fs.get_offline_features(vec)
df = resp.to_dataframe()
assert type(df["timestamp"][0]).__name__ == "Timestamp"
def test_error_flow(self):
df = pd.DataFrame(
{
"name": ["Jean", "Jacques", "Pierre"],
"last_name": ["Dubois", "Dupont", "Lavigne"],
}
)
measurements = fs.FeatureSet(
"measurements",
entities=[fs.Entity("name")],
engine="spark",
)
with pytest.raises(mlrun.errors.MLRunInvalidArgumentError):
fs.ingest(
measurements,
df,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
def test_ingest_to_csv(self):
key = "patient_id"
csv_path_spark = "v3io:///bigdata/test_ingest_to_csv_spark"
csv_path_storey = "v3io:///bigdata/test_ingest_to_csv_storey.csv"
measurements = fs.FeatureSet(
"measurements_spark",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
engine="spark",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
targets = [CSVTarget(name="csv", path=csv_path_spark)]
fs.ingest(
measurements,
source,
targets,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
csv_path_spark = measurements.get_target_path(name="csv")
measurements = fs.FeatureSet(
"measurements_storey",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
targets = [CSVTarget(name="csv", path=csv_path_storey)]
fs.ingest(
measurements,
source,
targets,
)
csv_path_storey = measurements.get_target_path(name="csv")
read_back_df_spark = None
file_system = fsspec.filesystem("v3io")
for file_entry in file_system.ls(csv_path_spark):
filepath = file_entry["name"]
if not filepath.endswith("/_SUCCESS"):
read_back_df_spark = pd.read_csv(f"v3io://{filepath}")
break
assert read_back_df_spark is not None
read_back_df_storey = None
for file_entry in file_system.ls(csv_path_storey):
filepath = file_entry["name"]
read_back_df_storey = pd.read_csv(f"v3io://{filepath}")
break
assert read_back_df_storey is not None
assert read_back_df_spark.sort_index(axis=1).equals(
read_back_df_storey.sort_index(axis=1)
)
@pytest.mark.parametrize("partitioned", [True, False])
def test_schedule_on_filtered_by_time(self, partitioned):
name = f"sched-time-{str(partitioned)}"
now = datetime.now()
path = "v3io:///bigdata/bla.parquet"
fsys = fsspec.filesystem(v3iofs.fs.V3ioFS.protocol)
pd.DataFrame(
{
"time": [
pd.Timestamp("2021-01-10 10:00:00"),
pd.Timestamp("2021-01-10 11:00:00"),
],
"first_name": ["moshe", "yosi"],
"data": [2000, 10],
}
).to_parquet(path=path, filesystem=fsys)
cron_trigger = "*/3 * * * *"
source = ParquetSource(
"myparquet", path=path, time_field="time", schedule=cron_trigger
)
feature_set = fs.FeatureSet(
name=name,
entities=[fs.Entity("first_name")],
timestamp_key="time",
engine="spark",
)
if partitioned:
targets = [
NoSqlTarget(),
ParquetTarget(
name="tar1",
path="v3io:///bigdata/fs1/",
partitioned=True,
partition_cols=["time"],
),
]
else:
targets = [
ParquetTarget(
name="tar2", path="v3io:///bigdata/fs2/", partitioned=False
),
NoSqlTarget(),
]
fs.ingest(
feature_set,
source,
run_config=fs.RunConfig(local=False),
targets=targets,
spark_context=self.spark_service,
)
# ingest starts every third minute and it can take ~150 seconds to finish.
time_till_next_run = 180 - now.second - 60 * (now.minute % 3)
sleep(time_till_next_run + 150)
features = [f"{name}.*"]
vec = fs.FeatureVector("sched_test-vec", features)
with fs.get_online_feature_service(vec) as svc:
resp = svc.get([{"first_name": "yosi"}, {"first_name": "moshe"}])
assert resp[0]["data"] == 10
assert resp[1]["data"] == 2000
pd.DataFrame(
{
"time": [
pd.Timestamp("2021-01-10 12:00:00"),
pd.Timestamp("2021-01-10 13:00:00"),
now + pd.Timedelta(minutes=10),
pd.Timestamp("2021-01-09 13:00:00"),
],
"first_name": ["moshe", "dina", "katya", "uri"],
"data": [50, 10, 25, 30],
}
).to_parquet(path=path)
sleep(180)
resp = svc.get(
[
{"first_name": "yosi"},
{"first_name": "moshe"},
{"first_name": "katya"},
{"first_name": "dina"},
{"first_name": "uri"},
]
)
assert resp[0]["data"] == 10
assert resp[1]["data"] == 50
assert resp[2] is None
assert resp[3]["data"] == 10
assert resp[4] is None
# check offline
resp = fs.get_offline_features(vec)
assert len(resp.to_dataframe() == 4)
assert "uri" not in resp.to_dataframe() and "katya" not in resp.to_dataframe()
def test_aggregations(self):
name = f"measurements_{uuid.uuid4()}"
test_base_time = datetime.fromisoformat("2020-07-21T21:40:00+00:00")
df = pd.DataFrame(
{
"time": [
test_base_time,
test_base_time + pd.Timedelta(minutes=1),
test_base_time + pd.Timedelta(minutes=2),
test_base_time + pd.Timedelta(minutes=3),
test_base_time + | pd.Timedelta(minutes=4) | pandas.Timedelta |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, | pd.Timestamp('2015-01-12') | pandas.Timestamp |
import random
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=pd.Index([1, 0]),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2] + expected_idx_non_na
if na_position == "first"
else expected_idx_non_na + [2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
with pytest.raises(ValueError, match="This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT.value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=[1, 0],
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(
{"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values},
columns=["datetime", "float"],
)
df_reversed = DataFrame(
{"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]},
columns=["datetime", "float"],
index=[1, 0],
)
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories(self):
# GH#22556
# Positioning missing value properly when column is Categorical.
categories = ["A", "B", "C"]
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = "first"
na_position_last = "last"
column_name = "c"
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices)
df = DataFrame(
{
column_name: Categorical(
["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True
)
}
)
# sort ascending with na first
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + categories, categories=categories, ordered=True
)
},
index=na_indices + category_indices,
)
tm.assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
categories + list_of_nans, categories=categories, ordered=True
)
},
index=category_indices + na_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + reversed_categories,
categories=categories,
ordered=True,
)
},
index=reversed_na_indices + reversed_category_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
reversed_categories + list_of_nans,
categories=categories,
ordered=True,
)
},
index=reversed_category_indices + reversed_na_indices,
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nat(self):
# GH#16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories_raises(self):
df = DataFrame(
{
"c": Categorical(
["A", np.nan, "B", np.nan, "C"],
categories=["A", "B", "C"],
ordered=True,
)
}
)
with pytest.raises(ValueError, match="invalid na_position: bad_position"):
df.sort_values(by="c", ascending=False, na_position="bad_position")
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, False, [2, 1, 0]),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
True,
[0, 1, 2],
),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
False,
[2, 1, 0],
),
],
)
def test_sort_values_ignore_index(
self, inplace, original_dict, sorted_dict, ignore_index, output_index
):
# GH 30114
df = DataFrame(original_dict)
expected = DataFrame(sorted_dict, index=output_index)
kwargs = {"ignore_index": ignore_index, "inplace": inplace}
if inplace:
result_df = df.copy()
result_df.sort_values("A", ascending=False, **kwargs)
else:
result_df = df.sort_values("A", ascending=False, **kwargs)
tm.assert_frame_equal(result_df, expected)
tm.assert_frame_equal(df, DataFrame(original_dict))
def test_sort_values_nat_na_position_default(self):
# GH 13230
expected = DataFrame(
{
"A": [1, 2, 3, 4, 4],
"date": pd.DatetimeIndex(
[
"2010-01-01 09:00:00",
"2010-01-01 09:00:01",
"2010-01-01 09:00:02",
"2010-01-01 09:00:03",
"NaT",
]
),
}
)
result = expected.sort_values(["A", "date"])
tm.assert_frame_equal(result, expected)
def test_sort_values_item_cache(self, using_array_manager):
# previous behavior incorrect retained an invalid _item_cache entry
df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"])
df["D"] = df["A"] * 2
ser = df["A"]
if not using_array_manager:
assert len(df._mgr.blocks) == 2
df.sort_values(by="A")
ser.values[0] = 99
assert df.iloc[0, 0] == df["A"][0]
def test_sort_values_reshaping(self):
# GH 39426
values = list(range(21))
expected = DataFrame([values], columns=values)
df = expected.sort_values(expected.index[0], axis=1, ignore_index=True)
tm.assert_frame_equal(df, expected)
class TestDataFrameSortKey: # test key sorting (issue 27237)
def test_sort_values_inplace_key(self, sort_by_key):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True, key=sort_by_key)
assert return_value is None
expected = frame.sort_values(by="A", key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=1, axis=1, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by=1, axis=1, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by="A", ascending=False, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True, key=sort_by_key
)
expected = frame.sort_values(by=["A", "B"], ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_key(self):
df = DataFrame(np.array([0, 5, np.nan, 3, 2, np.nan]))
result = df.sort_values(0)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: x + 5)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
result = df.sort_values("a", key=lambda x: -x)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import os
import sys
import subprocess
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
import scipy.cluster.hierarchy
from matplotlib import cm
from decneo.commonFunctions import read, write
import multiprocessing
cwd = '/mnt/gs18/scratch/users/paterno1/otherCellTypes_choroid/LRpairs/'
celltypes = ['Endothelial', 'Pericyte', 'Fibroblast', 'Macrophage', 'SMC']
genetypes = ['ligands', 'receptors']
def loadRamiowskiLRPairs(dir):
df = pd.read_excel(dir + 'SupplementaryData2Ramilowski.xlsx', sheet_name='All.Pairs', index_col=False)
df = df[['Ligand.ApprovedSymbol', 'Receptor.ApprovedSymbol', 'Pair.Evidence']]
df = df.loc[df['Pair.Evidence'] != 'EXCLUDED']
df = df.loc[df['Pair.Evidence'] != 'EXCLUDED not receptor']
df = df.loc[df['Pair.Evidence'] != 'EXCLUDED not ligand']
df = df.drop(columns=['Pair.Evidence'])
df.columns = ['ligand', 'receptor']
df = df.reset_index(drop=True)
return pd.MultiIndex.from_arrays([df['ligand'].values, df['receptor'].values], names=['ligand', 'receptor'])
def doLR(geneL, geneR, cutoff, hcutoff, dataDict, LR = None, suffix = 'data', makePlot = True, saveData = True):
name = '%s-%s' % (geneL, geneR)
grays = plt.cm.gray_r
resDict = dict()
for genetype in genetypes:
resDict.update({genetype: dict()})
for celltype in celltypes:
df_peaks = dataDict[hcutoff][genetype][celltype]
gene = geneL if genetype=='ligands' else geneR
se_peak = df_peaks[gene] if gene in df_peaks.columns else pd.Series(dtype=float)
resDict[genetype].update({celltype: se_peak[se_peak>=cutoff]})
if makePlot:
fig, ax = plt.subplots(figsize=[7,7])
cx = np.array([0.,0.25,0.5,0.75,1.])*1.35
gy = [0.2, 0.8]
colorg = ['blue', 'green']
sh = [-1, 1]
de = 0.035
celltypesO = ['SMC', 'Pericyte', 'Endothelial', 'Fibroblast', 'Macrophage']
if makePlot:
ax.text(0, 0.9, name, ha='center', va='center', fontsize=20)
gg = []
for ig1, genetype1 in enumerate(genetypes):
if makePlot:
ax.text(0.5*1.35, gy[ig1]+0.05*sh[ig1], genetype1, ha='center', va='center', fontsize=20)
for ic1, celltype1 in enumerate(celltypesO):
if makePlot:
ax.text(cx[ic1], gy[ig1]+0.00*sh[ig1], celltype1, ha='center', va='center', fontsize=15)
t1 = resDict[genetype1][celltype1]
group1 = 0
h1 = 1.
g1 = t1
temp1 = cx[ic1] + (-1/2 + group1 + 0.5)*de, gy[ig1]-0.05*sh[ig1]
if makePlot:
mec = 'k'
ax.plot(*temp1, 'o', ms=0.9*len(g1)/2, color=colorg[ig1], mec=mec, mew=1.0)
ggc = []
ig2, genetype2 = 1, 'receptors'
if genetype2!=genetype1:
for ic2, celltype2 in enumerate(celltypesO):
t2 = resDict[genetype2][celltype2]
group2 = 0
temp2 = cx[ic2] + (1/2-group2-0.5)*de, gy[ig2]-0.05*sh[ig2]
c = | pd.MultiIndex.from_tuples([(a, b) for a in g1.index for b in t2.index], names=['ligand', 'receptor']) | pandas.MultiIndex.from_tuples |
import pandas as pd
from django.utils.text import slugify
from hashlib import sha256
from django.utils.crypto import get_random_string
from query.models import Query
from WikiContrib.settings import API_TOKEN, GITHUB_ACCESS_TOKEN
ORGS = [
"wikimedia",
"wmde",
"DataValues",
"commons-app",
"wikidata",
"openzim",
"mediawiki-utilities",
"wiki-ai",
"wikimedia-research",
"toollabs",
"toolforge",
"counterVandalism"
]
API_ENDPOINTS = [
["""https://phabricator.wikimedia.org/api/maniphest.search""",
"""https://phabricator.wikimedia.org/api/user.search"""],
["""https://gerrit.wikimedia.org/r/changes/?q=owner:{gerrit_username}&o=DETAILED_ACCOUNTS""",
"""https://gerrit.wikimedia.org/r/accounts/?q=name:{gerrit_username}&o=DETAILS"""],
["""https://api.github.com/search/commits?per_page=100&q=author:{github_username}""",
"""https://api.github.com/search/issues?per_page=100&q=is:pr+is:merged+author:{github_username}"""]
]
REQUEST_DATA = [
{
'constraints[authorPHIDs][0]': '',
'api.token': API_TOKEN,
'constraints[createdStart]': 0,
'constraints[createdEnd]': 0
},
{
'constraints[assigned][0]': '',
'api.token': API_TOKEN,
'constraints[createdStart]': 0,
'constraints[createdEnd]': 0
},
{
'constraints[usernames][0]':'',
'api.token': API_TOKEN
},
{
'github_username':'',
'github_access_token':GITHUB_ACCESS_TOKEN,
'createdStart':0,
'createdEnd':0
}
]
def get_prev_user(file, ind):
prev_user = None
while True:
if ind != 0:
temp = file.iloc[ind - 1, :]
if pd.isnull(temp['fullname']) or (pd.isnull(temp['Gerrit']) and pd.isnull(temp['Phabricator'])):
ind -= 1
else:
prev_user = temp['fullname']
break
else:
break
return prev_user
def get_next_user(file, ind):
next_user = None
while True:
if ind != len(file) - 1:
temp = file.iloc[ind+1, :]
if pd.isnull(temp['fullname']) or (pd.isnull(temp['Gerrit']) and | pd.isnull(temp['Phabricator']) | pandas.isnull |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 & Series([False, np.NaN, False, False])
expected = Series([False] * 4)
tm.assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
s_0123 & s_abNd
def test_logical_operators_bool_dtype_with_int(self):
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
res = s_tft & 0
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft & 1
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_ops_bool_dtype_with_ndarray(self):
# make sure we operate on ndarray the same as Series
left = Series([True, True, True, False, True])
right = [True, False, None, True, np.nan]
expected = Series([True, False, False, False, False])
result = left & right
tm.assert_series_equal(result, expected)
result = left & np.array(right)
tm.assert_series_equal(result, expected)
result = left & Index(right)
tm.assert_series_equal(result, expected)
result = left & Series(right)
tm.assert_series_equal(result, expected)
expected = Series([True, True, True, True, True])
result = left | right
tm.assert_series_equal(result, expected)
result = left | np.array(right)
tm.assert_series_equal(result, expected)
result = left | Index(right)
tm.assert_series_equal(result, expected)
result = left | Series(right)
tm.assert_series_equal(result, expected)
expected = Series([False, True, True, True, True])
result = left ^ right
tm.assert_series_equal(result, expected)
result = left ^ np.array(right)
tm.assert_series_equal(result, expected)
result = left ^ Index(right)
tm.assert_series_equal(result, expected)
result = left ^ Series(right)
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self):
# GH#9016: support bitwise op for integer types
# with non-matching indexes, logical operators will cast to object
# before operating
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_tft = Series([True, False, True], index=index)
s_tff = Series([True, False, False], index=index)
s_0123 = Series(range(4), dtype="int64")
# s_0123 will be all false now because of reindexing like s_tft
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_tft & s_0123
tm.assert_series_equal(result, expected)
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_0123 & s_tft
tm.assert_series_equal(result, expected)
s_a0b1c0 = Series([1], list("b"))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list("abc"))
tm.assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list("abc"))
tm.assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s & datetime(2005, 1, 1)
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
result = s & list(s)
tm.assert_series_equal(result, expected)
def test_scalar_na_logical_ops_corners_aligns(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
d = DataFrame({"A": s})
expected = DataFrame(False, index=range(9), columns=["A"] + list(range(9)))
result = s & d
tm.assert_frame_equal(result, expected)
result = d & s
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", [operator.and_, operator.or_, operator.xor])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))], dtype=bool)
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
def test_reversed_xor_with_index_returns_index(self):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Index.symmetric_difference(idx1, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx1 ^ ser
tm.assert_index_equal(result, expected)
expected = Index.symmetric_difference(idx2, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx2 ^ ser
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"op",
[
pytest.param(
ops.rand_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __and__ returns Index intersection",
raises=AssertionError,
strict=True,
),
),
pytest.param(
ops.ror_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __or__ returns Index union",
raises=AssertionError,
strict=True,
),
),
],
)
def test_reversed_logical_op_with_index_returns_series(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series(op(idx1.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series(op(idx2.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op, expected",
[
(ops.rand_, Index([False, True])),
(ops.ror_, Index([False, True])),
(ops.rxor, Index([])),
],
)
def test_reverse_ops_with_index(self, op, expected):
# https://github.com/pandas-dev/pandas/pull/23628
# multi-set Index ops are buggy, so let's avoid duplicates...
ser = Series([True, False])
idx = Index([False, True])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# behaving as set ops is deprecated, will become logical ops
result = op(ser, idx)
tm.assert_index_equal(result, expected)
def test_logical_ops_label_based(self):
# GH#4947
# logical ops should be label based
a = Series([True, False, True], list("bca"))
b = Series([False, True, False], list("abc"))
expected = Series([False, True, False], list("abc"))
result = a & b
tm.assert_series_equal(result, expected)
expected = Series([True, True, False], list("abc"))
result = a | b
tm.assert_series_equal(result, expected)
expected = Series([True, False, False], list("abc"))
result = a ^ b
tm.assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list("bca"))
b = Series([False, True, False, True], list("abcd"))
expected = Series([False, True, False, False], list("abcd"))
result = a & b
tm.assert_series_equal(result, expected)
expected = Series([True, True, False, False], list("abcd"))
result = a | b
tm.assert_series_equal(result, expected)
# filling
# vs empty
empty = Series([], dtype=object)
result = a & empty.copy()
expected = Series([False, False, False], list("bca"))
tm.assert_series_equal(result, expected)
result = a | empty.copy()
expected = Series([True, False, True], list("bca"))
tm.assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ["z"])
expected = Series([False, False, False, False], list("abcz"))
tm.assert_series_equal(result, expected)
result = a | Series([1], ["z"])
expected = Series([True, True, False, False], list("abcz"))
tm.assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [
empty.copy(),
Series([1], ["z"]),
Series(np.nan, b.index),
Series(np.nan, a.index),
]:
result = a[a | e]
tm.assert_series_equal(result, a[a])
for e in [Series(["z"])]:
result = a[a | e]
tm.assert_series_equal(result, a[a])
# vs scalars
index = list("bca")
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
tm.assert_series_equal(result, expected)
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
for v in [np.nan, "foo"]:
with pytest.raises(TypeError, match=msg):
t | v
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
tm.assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
tm.assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
tm.assert_series_equal(result, expected)
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
for v in [np.nan]:
with pytest.raises(TypeError, match=msg):
t & v
def test_logical_ops_df_compat(self):
# GH#1134
s1 = Series([True, False, True], index=list("ABC"), name="x")
s2 = Series([True, True, False], index=list("ABD"), name="x")
exp = Series([True, False, False, False], index=list("ABCD"), name="x")
tm.assert_series_equal(s1 & s2, exp)
tm.assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp_or1 = Series([True, True, True, False], index=list("ABCD"), name="x")
tm.assert_series_equal(s1 | s2, exp_or1)
# np.nan | True => np.nan, filled with False
exp_or = Series([True, True, False, False], index=list("ABCD"), name="x")
| tm.assert_series_equal(s2 | s1, exp_or) | pandas._testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 10:08:44 2020
@author: <NAME>
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
cwd1 = '../../../../test/static/0_WingValidationPytornadoFramAT_case0/CFD/_results/'
cwd2 = '../../../../test/static/0_WingValidationPytornadoFramAT_case0/'
# Unloaded beam
filename_1 = '5_NoSL_NoAL_FEM_displacementAndRotations0.csv'
filename_2 = '10_NoSL_NoAL_FEM_displacementAndRotations0.csv'
filename_3 = '20_NoSL_NoAL_FEM_displacementAndRotations0.csv'
filename_4 = '40_NoSL_NoAL_FEM_displacementAndRotations0.csv'
filename_5 = '80_NoSL_NoAL_FEM_displacementAndRotations0.csv'
filename_6 = '160_NoSL_NoAL_FEM_displacementAndRotations0.csv'
filename_7 = '320_NoSL_NoAL_FEM_displacementAndRotations0.csv'
# Weighted beam with no aerodynamic forces
filename_8 = '5_SL_NoAL_FEM_displacementAndRotations0.csv'
filename_9 = '10_SL_NoAL_FEM_displacementAndRotations0.csv'
filename_10 = '20_SL_NoAL_FEM_displacementAndRotations0.csv'
filename_11 = '40_SL_NoAL_FEM_displacementAndRotations0.csv'
filename_12 = '80_SL_NoAL_FEM_displacementAndRotations0.csv'
filename_13 = '160_SL_NoAL_FEM_displacementAndRotations0.csv'
filename_14 = '320_SL_NoAL_FEM_displacementAndRotations0.csv'
# Influence of aerodynamic mesh
filename_15 = '10_NoSL_05x20AL_results.csv'
filename_16 = '10_NoSL_05x40AL_results.csv'
filename_17 = '10_NoSL_10x40AL_results.csv'
filename_18 = '10_NoSL_10x80AL_results.csv'
# Influence of aerodynamic mesh
filename_19 = '20_NoSL_05x20AL_results.csv'
filename_20 = '20_NoSL_05x40AL_results.csv'
filename_21 = '20_NoSL_10x40AL_results.csv'
filename_22 = '20_NoSL_10x80AL_results.csv'
# Influence of aerodynamic mesh
filename_23 = '40_NoSL_05x20AL_results.csv'
filename_24 = '40_NoSL_05x40AL_results.csv'
filename_25 = '40_NoSL_10x40AL_results.csv'
filename_26 = '40_NoSL_10x80AL_results.csv'
# Influence of aerodynamic mesh
filename_27 = '80_NoSL_05x20AL_results.csv'
filename_28 = '80_NoSL_05x40AL_results.csv'
filename_29 = '80_NoSL_10x40AL_results.csv'
filename_30 = '80_NoSL_10x80AL_results.csv'
# Beam mesh convergeance
# filename_26 = 's005_results.csv'
# filename_27 = 's010_results.csv'
# filename_28 = 's020_results.csv'
# filename_29 = 's040_results.csv'
# filename_30 = 's080_results.csv'
# filename_31 = 's160_results.csv'
# filename_32 = 's320_results.csv'
# No loads test
filenames_NoSL_NoAL = [filename_1, filename_2, filename_3, filename_4, filename_5, filename_6]
dataFrames_NoSL_NoAL = []
for filename in filenames_NoSL_NoAL:
df = pd.read_csv(cwd1 + filename)
dataFrames_NoSL_NoAL.append(df)
# Constant load test
filenames_SL_NoAL = [filename_8, filename_9, filename_10, filename_11, filename_12, filename_13]
dataFrames_SL_NoAL = []
for filename in filenames_SL_NoAL:
df = pd.read_csv(cwd1 + filename)
dataFrames_SL_NoAL.append(df)
# Influence of aerodynamic mesh
filenames_10_NoSL_AL = [filename_15, filename_16, filename_17, filename_18]
dataFrames_10NoSL_AL = []
for filename in filenames_10_NoSL_AL:
df1 = pd.read_csv(cwd2 + filename,sep=';')
df2 = df1.iloc[0:1,:]
df2['Relative error'] = 0
df = pd.concat([df2,df1])
dataFrames_10NoSL_AL.append(df)
# Influence of aerodynamic mesh
filenames_20_NoSL_AL = [filename_19, filename_20, filename_21, filename_22]
dataFrames_20NoSL_AL = []
for filename in filenames_20_NoSL_AL:
df1 = pd.read_csv(cwd2 + filename,sep=';')
df2 = df1.iloc[0:1,:]
df2['Relative error'] = 0
df = pd.concat([df2,df1])
dataFrames_20NoSL_AL.append(df)
# Influence of aerodynamic mesh
filenames_40NoSL_AL = [filename_23, filename_24, filename_25, filename_26]
dataFrames_40NoSL_AL = []
for filename in filenames_40NoSL_AL:
df1 = pd.read_csv(cwd2 + filename,sep=';')
df2 = df1.iloc[0:1,:]
df2['Relative error'] = 0
df = pd.concat([df2,df1])
dataFrames_40NoSL_AL.append(df)
# Influence of aerodynamic mesh
filenames_80NoSL_AL = [filename_27, filename_28, filename_29, filename_30]
dataFrames_80NoSL_AL = []
for filename in filenames_80NoSL_AL:
df1 = pd.read_csv(cwd2 + filename,sep=';')
df2 = df1.iloc[0:1,:]
df2['Relative error'] = 0
df = pd.concat([df2,df1])
dataFrames_80NoSL_AL.append(df)
# Influence of strcutre mesh
filenames_NoSL_10x80AL = [filename_18, filename_22, filename_26, filename_30]
dataFrames_NoSL_10x80AL = []
for filename in filenames_NoSL_10x80AL:
df1 = pd.read_csv(cwd2 + filename,sep=';')
df2 = df1.iloc[0:1,:]
df2['Relative error'] = 0
df = | pd.concat([df2,df1]) | pandas.concat |
import pathlib
from collections import defaultdict
from pysam import TabixFile
import pandas as pd
from ...utilities import parse_mc_pattern
def parse_trim_fastq_stats(stat_path):
# example trim fastq stats
"""
status in_reads in_bp too_short too_long too_many_n out_reads w/adapters qualtrim_bp out_bp
0 OK 1490 213724 0 0 0 1490 4 0 213712
1 status in_reads in_bp too_short too_long too_many_n out_reads w/adapters qualtrim_bp out_bp
2 OK 1490 213712 0 0 0 1482 0 1300 182546
"""
*cell_id, read_type = pathlib.Path(stat_path).name.split('.')[0].split('-')
cell_id = '-'.join(cell_id)
trim_stats = | pd.read_csv(stat_path, sep='\t') | pandas.read_csv |
# --------------------------------------------------------------------------------------
# Copyright (C) 2020–2021 by <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
# --------------------------------------------------------------------------------------
"""Subcommand to plot data."""
import logging.config
import time
from pathlib import Path
from typing import Tuple
import click
import numpy as np
import pandas as pd
from .. import create_logging_dict
from .. import PathLike
from ..libs.figure import Figure
@click.command("cluster", short_help="Plot data from QAA.")
@click.option(
"-i",
"--infile",
metavar="FILE",
default=Path.cwd().joinpath("input.csv"),
show_default=True,
type=click.Path(exists=True, file_okay=True, dir_okay=False, resolve_path=True),
help="Data file for analysis",
)
@click.option(
"--label",
metavar="FILE",
default=Path.cwd().joinpath("labels.npy"),
show_default=True,
type=click.Path(exists=False, file_okay=True, dir_okay=False, resolve_path=True),
help="Cluster labels",
)
@click.option(
"-c",
"--centroid",
metavar="FILE",
default=Path.cwd().joinpath("centroids.csv"),
show_default=True,
type=click.Path(exists=False, file_okay=True, dir_okay=False, resolve_path=True),
help="Cluster labels",
)
@click.option(
"-o",
"--outfile",
metavar="FILE",
default=Path.cwd().joinpath("cluster.png"),
show_default=True,
type=click.Path(exists=False, file_okay=True, dir_okay=False, resolve_path=True),
help="Image file",
)
@click.option(
"-l",
"--logfile",
metavar="LOG",
show_default=True,
default=Path.cwd() / "plot.log",
type=click.Path(exists=False, file_okay=True, resolve_path=True),
help="Log file",
)
@click.option(
"--axes",
metavar="AXES",
nargs=3,
default=(0, 1, 2),
type=click.IntRange(min=0, clamp=True),
help="Components to plot",
)
@click.option("--ica / --pca", "method", default=True, help="Type of data")
@click.option(
"--dpi",
metavar="DPI",
default=600,
show_default=True,
type=click.IntRange(min=100, clamp=True),
help="Resolution of the figure",
)
@click.option(
"--azim",
"azimuth",
metavar="AZIMUTH",
default=120,
show_default=True,
type=click.IntRange(min=0, max=359, clamp=True),
help="Azimuth rotation of 3D plot",
)
@click.option(
"--elev",
"elevation",
metavar="ELEVATION",
default=30,
show_default=True,
type=click.IntRange(min=0, max=90, clamp=True),
help="Elevation of 3D plot",
)
@click.option("--cluster", is_flag=True, help="Cluster analysis")
@click.option("-v", "--verbose", is_flag=True, help="Noisy output")
def cli(
infile: PathLike,
label: PathLike,
centroid: PathLike,
outfile: PathLike,
logfile: PathLike,
axes: Tuple[int, int, int],
method: bool,
dpi: int,
azimuth: int,
elevation: int,
cluster: bool,
verbose: bool,
) -> None:
"""Visualize the data."""
start_time: float = time.perf_counter()
in_file = Path(infile)
# Setup logging
logging.config.dictConfig(create_logging_dict(logfile))
logger: logging.Logger = logging.getLogger(__name__)
data_method = "ica" if method else "pca"
sorted_axes = np.sort(axes)
features = [f"{data_method[:2].upper()}{_+1:d}" for _ in sorted_axes]
# Load data
logger.info("Loading %s", in_file)
index = "Frame"
data = read_file(in_file, index=index)
if data.empty:
raise SystemExit(f"Unable to read {in_file}")
data.columns = (
[f"{data_method[:2].upper()}{_+1:d}" for _ in range(data.columns.size)]
if np.issubdtype(data.columns, int)
else data.columns
)
try:
data = pd.concat([data["Cluster"], data[features].reset_index()], axis=1)
except KeyError:
data = data[features].reset_index()
# Load labels, if exists
centroid_data: pd.DataFrame = pd.DataFrame()
if cluster:
label_data = read_file(Path(label))
if "Cluster" not in data.columns and not label_data.empty:
label_data.columns = ["Cluster"]
data = pd.concat([label_data, data], axis=1)
# Load centroid data, if exists
centroid_data = read_file(Path(centroid), index="Cluster")
if not centroid_data.empty:
centroid_data = centroid_data.set_index("Cluster")
centroid_data.columns = features
centroid_data = centroid_data.reset_index()
else:
n_samples, _ = data.shape
label_data = pd.Series(np.zeros(n_samples, dtype=int), name="Cluster")
if "Cluster" not in data.columns:
data = | pd.concat([label_data, data], axis=1) | pandas.concat |
from __future__ import annotations
from datetime import (
datetime,
time,
timedelta,
tzinfo,
)
from typing import (
TYPE_CHECKING,
Literal,
overload,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
tslib,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Resolution,
Timestamp,
conversion,
fields,
get_resolution,
iNaT,
ints_to_pydatetime,
is_date_array_normalized,
normalize_i8_timestamps,
timezones,
to_offset,
tzconversion,
)
from pandas._typing import npt
from pandas.errors import PerformanceWarning
from pandas.util._validators import validate_inclusive
from pandas.core.dtypes.cast import astype_dt64_to_dt64tz
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_object_dtype,
is_period_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.arrays import (
ExtensionArray,
datetimelike as dtl,
)
from pandas.core.arrays._ranges import generate_regular_range
from pandas.core.arrays.integer import IntegerArray
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.tseries.frequencies import get_period_alias
from pandas.tseries.offsets import (
BDay,
Day,
Tick,
)
if TYPE_CHECKING:
from pandas import DataFrame
from pandas.core.arrays import (
PeriodArray,
TimedeltaArray,
)
_midnight = time(0, 0)
def tz_to_dtype(tz):
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
Parameters
----------
tz : tzinfo or None
Returns
-------
np.dtype or Datetime64TZDType
"""
if tz is None:
return DT64NS_DTYPE
else:
return DatetimeTZDtype(tz=tz)
def _field_accessor(name: str, field: str, docstring=None):
def f(self):
values = self._local_timestamps()
if field in self._bool_ops:
result: np.ndarray
if field.endswith(("start", "end")):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
result = fields.get_start_end_field(
values, field, self.freqstr, month_kw
)
else:
result = fields.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.get_date_name_field(values, field)
result = self._maybe_mask_results(result, fill_value=None)
else:
result = fields.get_date_field(values, field)
result = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"""
Pandas ExtensionArray for tz-naive or tz-aware datetime data.
.. warning::
DatetimeArray is currently experimental, and its API may change
without warning. In particular, :attr:`DatetimeArray.dtype` is
expected to change to always be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : Series, Index, DatetimeArray, ndarray
The datetime data.
For DatetimeArray `values` (or a Series or Index boxing one),
`dtype` and `freq` will be extracted from `values`.
dtype : numpy.dtype or DatetimeTZDtype
Note that the only NumPy dtype allowed is 'datetime64[ns]'.
freq : str or Offset, optional
The frequency.
copy : bool, default False
Whether to copy the underlying array of values.
Attributes
----------
None
Methods
-------
None
"""
_typ = "datetimearray"
_scalar_type = Timestamp
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype = is_datetime64_any_dtype
_infer_matches = ("datetime", "datetime64", "date")
# define my properties & methods for delegation
_bool_ops: list[str] = [
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
]
_object_ops: list[str] = ["freq", "tz"]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"week",
"weekday",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"daysinmonth",
"microsecond",
"nanosecond",
]
_other_ops: list[str] = ["date", "time", "timetz"]
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
"tz_convert",
"normalize",
"strftime",
"round",
"floor",
"ceil",
"month_name",
"day_name",
]
# ndim is inherited from ExtensionArray, must exist to ensure
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_dtype: np.dtype | DatetimeTZDtype
_freq = None
def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy: bool = False):
values = extract_array(values, extract_numpy=True)
if isinstance(values, IntegerArray):
values = values.to_numpy("int64", na_value=iNaT)
inferred_freq = getattr(values, "_freq", None)
if isinstance(values, type(self)):
# validation
dtz = getattr(dtype, "tz", None)
if dtz and values.tz is None:
dtype = DatetimeTZDtype(tz=dtype.tz)
elif dtz and values.tz:
if not timezones.tz_compare(dtz, values.tz):
msg = (
"Timezone of the array and 'dtype' do not match. "
f"'{dtz}' != '{values.tz}'"
)
raise TypeError(msg)
elif values.tz:
dtype = values.dtype
if freq is None:
freq = values.freq
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError(
f"Unexpected type '{type(values).__name__}'. 'values' must be "
"a DatetimeArray, ndarray, or Series or Index containing one of those."
)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view(DT64NS_DTYPE)
if values.dtype != DT64NS_DTYPE:
raise ValueError(
"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
f"Got {values.dtype} instead."
)
dtype = _validate_dt64_dtype(dtype)
if freq == "infer":
raise ValueError(
"Frequency inference not allowed in DatetimeArray.__init__. "
"Use 'pd.array()' instead."
)
if copy:
values = values.copy()
if freq:
freq = to_offset(freq)
if getattr(dtype, "tz", None):
# https://github.com/pandas-dev/pandas/issues/18595
# Ensure that we have a standard timezone for pytz objects.
# Without this, things like adding an array of timedeltas and
# a tz-aware Timestamp (with a tz specific to its datetime) will
# be incorrect(ish?) for the array as a whole
dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz))
| NDArrayBacked.__init__(self, values=values, dtype=dtype) | pandas._libs.arrays.NDArrayBacked.__init__ |
"""Test utility functions.
All tests should not only assert that modified model specifications are correct but
also that there are no side effects on the inputs.
"""
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import yaml
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
from skillmodels.process_model import process_model
from skillmodels.utilities import _get_params_index_from_model_dict
from skillmodels.utilities import _remove_from_dict
from skillmodels.utilities import _remove_from_list
from skillmodels.utilities import _shorten_if_necessary
from skillmodels.utilities import extract_factors
from skillmodels.utilities import reduce_n_periods
from skillmodels.utilities import remove_controls
from skillmodels.utilities import remove_factors
from skillmodels.utilities import remove_measurements
from skillmodels.utilities import switch_linear_to_translog
from skillmodels.utilities import switch_translog_to_linear
from skillmodels.utilities import update_parameter_values
# importing the TEST_DIR from config does not work for test run in conda build
TEST_DIR = Path(__file__).parent.resolve()
@pytest.fixture
def model2():
with open(TEST_DIR / "model2.yaml") as y:
model_dict = yaml.load(y, Loader=yaml.FullLoader)
return model_dict
@pytest.mark.parametrize("factors", ["fac2", ["fac2"]])
def test_extract_factors_single(model2, factors):
reduced = extract_factors(factors, model2)
assert list(reduced["factors"]) == ["fac2"]
assert list(model2["factors"]) == ["fac1", "fac2", "fac3"]
assert "anchoring" not in reduced
assert model2["anchoring"]["outcomes"] == {"fac1": "Q1"}
process_model(reduced)
def test_update_parameter_values():
params = pd.DataFrame()
params["value"] = np.arange(5)
others = [
pd.DataFrame([[7], [8]], columns=["value"], index=[1, 4]),
pd.DataFrame([[9]], columns=["value"], index=[2]),
]
expected = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# gh-17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
| pd.Timestamp('2017-05-02 00:00:00') | pandas.Timestamp |
# Fix paths for imports to work in unit tests ----------------
if __name__ == "__main__":
from _fix_paths import fix_paths
fix_paths()
# ------------------------------------------------------------
# Load libraries ---------------------------------------------
from typing import List, Dict
from random import choice
from collections import namedtuple
from copy import deepcopy
import numpy as np
import pandas as pd
# ------------------------------------------------------------
class Simulator(object):
"""
Base class for all simulators. Step method is a dummy one.
:ivar list state_set: List of possible states.
:ivar list action_set: List of valid actions.
:ivar dict modules: Dictionary of modules used to model stochastic variables in the simulator.
:ivar int s_ix: State index.
:ivar dict internals: Internal variable for storing historical
internal values of the simulator.
"""
def __init__(self, state_set, action_set, modules):
"""
:param list state_set: List of possible states.
:param list action_set: List of valid actions.
:param dict modules: Dictionary of modules used to model stochastic variables in the simulator.
"""
self.state_set = state_set # type: list
self.action_set = action_set # type: list
self.modules = modules # type: dict
self.s_ix = 0 # type: int
self.internals = dict() # type: dict
def reset(self):
"""
Resets the simulator setting the state index to 0.
"""
self.s_ix = 0 # default_cvr init state
def _next_state(self):
"""
Moves simulator to the next state.
"""
self.s_ix += 1
if self.s_ix >= len(self.state_set):
self.s_ix = 0
def step(self, a):
"""
Performs one step of a simulation returning reward for the given action.
:param Action a: Action to be performed.
:return: Tuple of reward (profit after applying the given action) and info dict.
:rtype: tuple
"""
assert(a in self.action_set)
N_A = 10
N_c = 5
cpc = 1.0
rpc = 3.0
r = (rpc - cpc) * N_c
# History keeping internally
self.internals.update({
"N_A": N_A,
"N_c": N_c,
"cpc": cpc,
"rpc": rpc})
self._next_state()
info = {}
return r, info
def state(self):
"""
Returns a copy of the current state.
:return: action_set copy of the current state.
:rtype: State
"""
return deepcopy(self.state_set[self.s_ix])
def get_history(self):
"""
Returns a copy of the history stored in the simulator.
:return: A copy of the history stored in the simulator.
:rtype: dict
"""
return deepcopy(self.internals)
class SimulatorConstRPC(Simulator):
"""
Basic auction simulator with auctions, clicks, revenue per click and cost per click modules.
:ivar list state_set: List of possible states.
:ivar list action_set: List of valid actions.
:ivar dict modules: Dictionary of modules used to model stochastic variables in the simulator.
:ivar int s_ix: State index.
:ivar dict internals: Internal variable for storing historical state values.
"""
def __init__(self, state_set, action_set, modules):
"""
:param list state_set: List of possible states.
:param list action_set: List of valid actions.
:param dict modules: Dictionary of modules used to model stochastic variables in the simulator.
"""
Simulator.__init__(self, state_set, action_set, modules)
assert("auctions" in modules.keys())
assert("clicks" in modules.keys())
assert("rpc" in modules.keys())
assert("cpc" in modules.keys())
def step(self, a):
"""
Performs one step of a simulation returning reward for the given action.
:param Action a: Action to be performed.
:return: Tuple of reward (profit after applying the given action) and info dict.
:rtype: tuple
"""
assert(a in self.action_set)
N_A = self.modules["auctions"].sample()
N_c = self.modules["clicks"].sample(n=N_A, bid=a.bid)
cpc = self.modules["cpc"].get_cpc(a.bid)
rpc = self.modules["rpc"].get_rpc()
r = (rpc - cpc) * N_c
info = {"revenue": rpc * N_c,
"cost": cpc * N_c,
"num_auction": N_A,
"num_click": N_c}
# History keeping internally
self.internals.update({
"N_A": N_A,
"N_c": N_c,
"cpc": cpc,
"rpc": rpc})
self._next_state()
return r, info
class SimulatorConstRPCHoW(Simulator):
"""
Auction simulator using hours of week (encoded as a value in the range 0-167) as states
and a constant revenue per click for every hour of week.
:ivar list state_set: List of possible states.
:ivar list action_set: List of valid actions.
:ivar dict modules: Dictionary of modules used to model stochastic variables in the simulator.
:ivar int s_ix: State index.
:ivar dict internals: Internal variable for storing historical state values.
"""
def __init__(self, state_set, action_set, modules):
"""
:param list state_set: List of possible states.
:param list action_set: List of valid actions.
:param dict modules: Dictionary of modules used to model stochastic variables in the simulator.
"""
Simulator.__init__(self, state_set, action_set, modules)
assert("auctions" in modules.keys())
assert("clicks" in modules.keys())
assert("rpc" in modules.keys())
assert("cpc" in modules.keys())
def step(self, a):
"""
Performs one step of a simulation returning reward for the given action.
:param Action a: Action to be performed.
:return: Tuple of reward (profit after applying the given action) and info dict.
:rtype: tuple
"""
assert(a in self.action_set)
# s_ix is assumed to be hour-of-week
# In general: (self.s[s_ix].t) % 168 would do the job (assuming t=0 is HoW=0)
how = self.s_ix
N_A = self.modules["auctions"].sample(how=how)
N_c = self.modules["clicks"].sample(n=N_A, bid=a.bid, how=how)
rpc = self.modules["rpc"].get_rpc(how=how)
cpc = self.modules["cpc"].get_cpc(a.bid, how=how)
r = (rpc - cpc) * N_c
info = {"revenue": rpc * N_c,
"cost": cpc * N_c,
"num_auction": N_A,
"num_click": N_c}
# Hist keeping internally
self.internals.update({
"N_A": N_A,
"N_c": N_c,
"revenue": rpc * N_c,
"rpc": rpc,
"cost": cpc * N_c,
"cpc": cpc})
self._next_state()
return r, info
class SimulatorConversionBasedRevenue(Simulator):
"""
Auction simulator using conversion based revenue (a revenue is based on the number of conversions
sampled from the number of clicks).
:ivar list state_set: List of possible states.
:ivar list action_set: List of valid actions.
:ivar dict modules: Dictionary of modules used to model stochastic variables in the simulator.
:ivar int s_ix: State index.
:ivar dict internals: Internal variable for storing historical state values.
"""
def __init__(self, state_set, action_set, modules):
"""
:param list state_set: List of possible states.
:param list action_set: List of valid actions.
:param dict modules: Dictionary of modules used to model stochastic variables in the simulator.
"""
Simulator.__init__(self, state_set, action_set, modules)
assert("auctions" in modules.keys())
assert("clicks" in modules.keys())
assert("conversions" in modules.keys())
assert("revenue" in modules.keys())
assert("cpc" in modules.keys())
def step(self, a):
"""
Performs one step of a simulation returning reward for the given action.
:param Action a: Action to be performed.
:return: Tuple of reward (profit after applying the given action) and info dict.
:rtype: tuple
"""
assert(a in self.action_set)
N_A = self.modules["auctions"].sample()
N_c = self.modules["clicks"].sample(n=N_A, bid=a.bid)
N_v = self.modules["conversions"].sample(n=N_c)
revenue = self.modules["revenue"].get_revenue(N_v)
cpc = self.modules["cpc"].get_cpc(a.bid)
r = revenue - cpc * N_c
info = {"revenue": revenue,
"cost": cpc * N_c,
"num_auction": N_A,
"num_click": N_c}
# Hist keeping internally
self.internals.update({
"N_A": N_A,
"N_c": N_c,
"N_v": N_v,
"revenue": revenue,
"rpc": revenue / N_c,
"cost": cpc * N_c,
"cpc": cpc})
self._next_state()
return r, info
class SimulatorConversionBasedRevenueHoW(Simulator):
"""
Auction simulator using hours of week (encoded as a value in the range 0-167) as states
and a conversion based revenue (a revenue is based on the number of conversions sampled
from the number of clicks).
:ivar list state_set: List of possible states.
:ivar list action_set: List of valid actions.
:ivar dict modules: Dictionary of modules used to model stochastic variables in the simulator.
:ivar int s_ix: State index.
:ivar dict internals: Internal variable for storing historical state values.
"""
def __init__(self, state_set, action_set, modules):
"""
:param list state_set: List of possible states.
:param list action_set: List of valid actions.
:param dict modules: Dictionary of modules used to model stochastic variables in the simulator.
"""
Simulator.__init__(self, state_set, action_set, modules)
assert("auctions" in modules.keys())
assert("clicks" in modules.keys())
assert("conversions" in modules.keys())
assert("revenue" in modules.keys())
assert("cpc" in modules.keys())
def step(self, a):
"""
Performs one step of a simulation returning reward for the given action.
:param Action a: Action to be performed.
:return: Tuple of reward (profit after applying the given action)
and info dict.
:rtype: tuple
"""
assert(a in self.action_set)
how = self.s_ix
N_A = self.modules["auctions"].sample(how=how)
N_c = self.modules["clicks"].sample(n=N_A, bid=a.bid, how=how)
N_v = self.modules["conversions"].sample(num_clicks=N_c, how=how)
revenue = self.modules["revenue"].get_revenue(N_v, how=how)
cpc = self.modules["cpc"].get_cpc(a.bid, how=how)
r = revenue - cpc * N_c
info = {"revenue": revenue,
"cost": cpc * N_c,
"num_auction": N_A,
"num_click": N_c}
# Hist keeping internally
self.internals.update({
"N_A": N_A,
"N_c": N_c,
"N_v": N_v,
"revenue": revenue,
"rpc": 0 if N_c == 0 else revenue / N_c,
"cost": cpc * N_c,
"cpc": cpc})
self._next_state()
return r, info
class SimulatorConversionBasedRevenueDate(Simulator):
"""
Auction simulator using a series of dates in a specified range as states and a conversion based revenue
(a revenue is based on the number of conversions sampled from the number of clicks).
:ivar list state_set: List of possible states.
:ivar list action_set: List of valid actions.
:ivar dict modules: Dictionary of modules used to model stochastic variables in the simulator.
:ivar int s_ix: State index.
:ivar dict internals: Internal variable for storing historical state values.
:ivar float income_share: Optimization type: 1.0 - hotel, 0.x - OTA.
"""
def __init__(self, state_set, action_set, modules, income_share=1.0):
"""
:param list state_set: List of possible states.
:param list action_set: List of valid actions.
:param dict modules: Dictionary of modules used to model stochastic variables in the simulator.
:param float income_share: Optimization type: 1.0 - hotel, 0.x - OTA.
"""
Simulator.__init__(self, state_set, action_set, modules)
self.hist = []
self.income_share = income_share
assert("auctions" in modules.keys())
assert("clicks" in modules.keys())
assert("conversions" in modules.keys())
assert("revenue" in modules.keys())
assert("cpc" in modules.keys())
# assert("avg_price" in modules.keys())
def step(self, a):
"""
Performs one step of a simulation returning reward for the given action.
:param Action a: Action to be performed.
:return: Tuple of reward (profit after applying the given action)
and info dict containing:
* auctions,
* clicks,
* conversions,
* click_probability,
* cvr,
* rpc,
* rpc_is,
* cpc,
* cpc_bid,
* dcpc,
* rpv,
* rpv_is,
* revenue,
* revenue_is,
* cost,
* profit,
* profit_is.
:rtype: tuple
"""
assert(a in self.action_set)
state = self.state_set[self.s_ix]
n_a = self.modules["auctions"].sample(date=state.date)
n_c = self.modules["clicks"].sample(n=n_a, bid=a.bid, date=state.date)
real_cvr = self.modules["conversion_rate"].get_cvr(bid=a.bid, date=state.date)
n_v = self.modules["conversions"].sample(num_clicks=n_c, cvr=real_cvr, date=state.date)
cpc = self.modules["cpc"].get_cpc(a.bid, date=state.date)
revenue = self.modules["revenue"].get_revenue(n_v, date=state.date)
revenue_is = revenue * self.income_share
rpc = revenue / n_c if n_c != 0 else 0.0
rpc_is = rpc * self.income_share
rpv = revenue / n_v if n_v != 0 else 0.0
rpv_is = rpv * self.income_share
cost = cpc * n_c
profit = revenue - cost
profit_is = revenue * self.income_share - cost
reward = profit_is
info = {
"auctions": n_a,
"clicks": n_c,
"conversions": n_v,
"click_probability": n_c / n_a if n_a != 0 else 0,
"cvr": n_v / n_c if n_c != 0 else 0.0,
"rpc": rpc,
"rpc_is": rpc_is,
"cpc": cpc,
"cpc_bid": a.bid,
"dcpc": a.bid - cpc,
"rpv": rpv,
"rpv_is": rpv_is,
"revenue": revenue,
"revenue_is": revenue_is,
"cost": cost,
"profit": profit,
"profit_is": profit_is,
}
if "avg_price" in self.modules.keys():
avg_price = self.modules["avg_price"].get_avg_price(date=state.date)
info["avg_price"] = avg_price
if "average_position" in self.modules.keys():
average_position = self.modules["average_position"].get_average_position(
p=self.modules["click_probability"].get_cp(a.bid, date=state.date),
date=state.date
)
info["average_position"] = average_position
# Hist keeping internally
prior_auctions = self.modules["auctions"].L.loc[self.modules["auctions"].L.date == state.date,
"auctions"].iloc[0]
cp_bid = self.modules["clicks"].p.get_cp(a.bid, state.date)
real_rpv = self.modules["revenue"].models[state.date].last_rpv
real_rpc = real_cvr * real_rpv
real_rpc_is = real_rpc * self.income_share
expected_profit = prior_auctions * cp_bid * (real_cvr * real_rpv - cpc)
expected_profit_is = prior_auctions * cp_bid * (self.income_share * real_cvr * real_rpv - cpc)
internals_update = {
"real_cvr": real_cvr,
"real_rpc": real_rpc,
"real_rpc_is": real_rpc_is,
"real_rpv": real_rpv,
"real_rpv_is": real_rpv * self.income_share,
"expected_profit": expected_profit,
"expected_profit_is": expected_profit_is
}
internals_update.update(info)
self.internals.update(internals_update)
self._next_state()
return reward, info
def get_empty_info(self):
"""
Returns a default_cvr lack of activity info for this simulator, as returned
by the step function. Can be used to make proper initializations
in policies before the first act.
:return: Dictionary with default_cvr lack of activity info.
:rtype: dict
"""
info = {
"auctions": 0,
"clicks": 0,
"conversions": 0,
"click_probability": 0.0001,
"cvr": 0.0,
"rpc": 0.0,
"rpc_is": 0.0,
"cpc": 0.0,
"cpc_bid": 0.01,
"dcpc": 0.0,
"rpv": 0.0,
"rpv_is": 0.0,
"revenue": 0.0,
"revenue_is": 0.0,
"cost": 0.0,
"profit": 0.0,
"profit_is": 0.0,
"avg_price": 0.0,
"average_position": 6.0
}
return info
class SimulatorConversionBasedRevenueDateHoW(Simulator):
"""
Auction simulator using dates and hours of week in a specified range as states (dates as strings in the
format yyyy-mm-dd, hour of week as an integer in the range 0-167) and a conversion based revenue
(a revenue is based on the number of conversions sampled from the number of clicks).
:ivar list state_set: List of possible states.
:ivar list action_set: List of valid actions.
:ivar dict modules: Dictionary of modules used to model stochastic variables in the simulator.
:ivar int s_ix: State index.
:ivar dict internals: Internal variable for storing historical state values.
:ivar float income_share: Optimization type: 1.0 - hotel, 0.x - OTA.
"""
def __init__(self, state_set, action_set, modules, income_share=1.0):
"""
:param list state_set: List of possible states.
:param list action_set: List of valid actions.
:param dict modules: Dictionary of modules used to model stochastic variables in the simulator.
:param float income_share: Optimization type: 1.0 - hotel, 0.x - OTA.
"""
Simulator.__init__(self, state_set, action_set, modules)
self.income_share = income_share
def step(self, a):
"""
Performs one step of a simulation returning reward for the given action.
:param Action a: Action to be performed.
:return: Tuple of reward (profit after applying the given action)
and info dict containing:
* auctions,
* clicks,
* conversions,
* click_probability,
* cvr,
* rpc,
* rpc_is,
* cpc,
* cpc_bid,
* dcpc,
* rpv,
* rpv_is,
* revenue,
* revenue_is,
* cost,
* profit,
* profit_is.
:rtype: tuple
"""
assert a in self.action_set
state = self.state_set[self.s_ix]
n_a = self.modules["auctions"].sample(date=state.date, how=state.how)
n_c = self.modules["clicks"].sample(n=n_a, bid=a.bid, date=state.date, how=state.how)
real_cvr = self.modules["conversion_rate"].get_cvr(bid=a.bid, date=state.date, how=state.how)
n_v = self.modules["conversions"].sample(num_clicks=n_c, cvr=real_cvr, date=state.date, how=state.how)
cpc = self.modules["cpc"].get_cpc(a.bid, date=state.date, how=state.how)
revenue = self.modules["revenue"].get_revenue(n_v, date=state.date, how=state.how)
revenue_is = revenue * self.income_share
rpc = revenue / n_c if n_c != 0 else 0.0
rpc_is = rpc * self.income_share
rpv = revenue / n_v if n_v != 0 else 0.0
rpv_is = rpv * self.income_share
cost = cpc * n_c
profit = revenue - cost
profit_is = revenue * self.income_share - cost
reward = profit_is
info = {
"auctions": n_a,
"clicks": n_c,
"conversions": n_v,
"click_probability": n_c / n_a if n_a != 0 else 0,
"cvr": n_v / n_c if n_c != 0 else 0.0,
"rpc": rpc,
"rpc_is": rpc_is,
"cpc": cpc,
"cpc_bid": a.bid,
"dcpc": a.bid - cpc,
"rpv": rpv,
"rpv_is": rpv_is,
"revenue": revenue,
"revenue_is": revenue_is,
"cost": cost,
"profit": profit,
"profit_is": profit_is,
}
if "avg_price" in self.modules.keys():
avg_price = self.modules["avg_price"].get_avg_price(date=state.date, how=state.how)
info["avg_price"] = avg_price
if "average_position" in self.modules.keys():
average_position = self.modules["average_position"].get_average_position(
p=self.modules["click_probability"].get_cp(a.bid, date=state.date, how=state.how),
date=state.date,
how=state.how
)
info["average_position"] = average_position
# Hist keeping internally
prior_auctions = self.modules["auctions"].L.loc[(self.modules["auctions"].L.date == state.date) &
(self.modules["auctions"].L.hour_of_week == state.how),
"auctions"].iloc[0]
cp_bid = self.modules["clicks"].p.get_cp(a.bid, state.date, state.how)
real_rpv = self.modules["revenue"].models["{}.{}".format(state.date, state.how)].last_rpv
real_rpc = real_cvr * real_rpv
real_rpc_is = real_rpc * self.income_share
expected_profit = prior_auctions * cp_bid * (real_cvr * real_rpv - cpc)
expected_profit_is = prior_auctions * cp_bid * (self.income_share * real_cvr * real_rpv - cpc)
internals_update = {
"real_cvr": real_cvr,
"real_rpc": real_rpc,
"real_rpc_is": real_rpc_is,
"real_rpv": real_rpv,
"real_rpv_is": real_rpv * self.income_share,
"expected_profit": expected_profit,
"expected_profit_is": expected_profit_is
}
internals_update.update(info)
self.internals.update(internals_update)
self._next_state()
return reward, info
def get_empty_info(self):
"""
Returns a default_cvr lack of activity info for this simulator, as returned
by the step function. Can be used to make proper initializations
in policies before the first act.
:return: Dictionary with default_cvr lack of activity info.
:rtype: dict
"""
info = {
"auctions": 0,
"clicks": 0,
"conversions": 0,
"click_probability": 0.0001,
"cvr": 0.0,
"rpc": 0.0,
"rpc_is": 0.0,
"cpc": 0.0,
"cpc_bid": 0.01,
"dcpc": 0.0,
"rpv": 0.0,
"rpv_is": 0.0,
"revenue": 0.0,
"revenue_is": 0.0,
"cost": 0.0,
"profit": 0.0,
"profit_is": 0.0,
"avg_price": 0.0,
"average_position": 6.0
}
return info
if __name__ == "__main__":
import unittest
class TestSimulatorConstRPC(unittest.TestCase):
def test_sanity(self):
print("----------------------------------------")
print("SimulatorConstRPC sample run")
from ssa_sim_v2.simulator.modules.auctions.auctions_base import AuctionsPoisson
from ssa_sim_v2.simulator.modules.clicks.clicks_base import ClicksBinomialClickProbFunction
from ssa_sim_v2.simulator.modules.cpc.cpc_base import CPCFirstPrice
from ssa_sim_v2.simulator.modules.rpc.rpc_base import RPCUniform
hist_keys = ["s", "a", "r", "env"]
S = namedtuple("State", ["t"])
A = namedtuple("Action", ["bid"])
N = 10
Sset = [S(t) for t in range(5)]
Aset = [A(b) for b in range(10)]
# Load data from csv to find fitted parameter from data
mods = {"auctions": AuctionsPoisson(L=100),
"clicks": ClicksBinomialClickProbFunction((lambda b: 0.02)),
"cpc": CPCFirstPrice(),
"rpc": RPCUniform(low=10, high=100)}
E = SimulatorConstRPC(Sset, Aset, mods)
E.reset()
s = E.state()
hist = []
for n in range(N):
a = choice(Aset)
r, info = E.step(a)
s2 = E.state()
# Learning
# Hist-keeping
h = {}
for k in hist_keys:
if k == "s":
h[k] = s
if k == "a":
h[k] = a
if k == "r":
h[k] = r
if k == "env":
h[k] = E.get_history()
hist.append(h)
s = s2
for h in hist:
print(h)
print("")
self.assertTrue(True)
print("")
class TestSimulatorConstRPCHoW(unittest.TestCase):
def test_sanity(self):
print("----------------------------------------")
print("SimulatorConstRPCHoW sample run")
from ssa_sim_v2.simulator.modules.auctions.auctions_how import AuctionsPoissonHoW
from ssa_sim_v2.simulator.modules.click_probability.click_probability_how import ClickProbabilityLogisticLogHoW
from ssa_sim_v2.simulator.modules.clicks.clicks_how import ClicksBinomialClickProbModelHoW
from ssa_sim_v2.simulator.modules.cpc.cpc_how import CPCBidMinusCpcDiffHoW
from ssa_sim_v2.simulator.modules.rpc.rpc_how import RPCHistoricalAvgHoW
hist_keys = ["s", "a", "r", "env"]
S = namedtuple("State", ["t"])
A = namedtuple("Action", ["bid"])
Ssize = 168
Asize = 5001
Sset = [S(t) for t in range(Ssize)]
Aset = [A(round(float(b) / 100, 2)) for b in range(Asize)]
# Initialize auctions prior
auctions = np.random.exponential(100, size=168)
# Initialize clicks prior
pc_init = np.random.uniform(low=0.0, high=0.5, size=168)
bids_init = np.random.uniform(low=0.0, high=20.0, size=168)
click_prob_model = ClickProbabilityLogisticLogHoW(pc_init, bids_init)
# Initialize rpc prior
mu_rpc = np.random.uniform(low=10.0, high=50.0, size=168)
# Initialize cpc prior
avg_bids = np.random.uniform(high=5.0, size=168)
avg_cpcs = np.random.uniform(high=avg_bids)
# Module setup for env
mods = {"auctions": AuctionsPoissonHoW(L=auctions),
"clicks": ClicksBinomialClickProbModelHoW(click_prob_model),
"rpc": RPCHistoricalAvgHoW(mu_rpc),
"cpc": CPCBidMinusCpcDiffHoW(avg_bids, avg_cpcs)}
E = SimulatorConstRPCHoW(Sset, Aset, mods)
E.reset()
s = E.state()
hist = []
N = 168
for n in range(N):
a = choice(Aset)
r, info = E.step(a)
s2 = E.state()
# Learning
# Hist-keeping
h = {}
for k in hist_keys:
if k == "s":
h[k] = s
if k == "a":
h[k] = a
if k == "r":
h[k] = r
if k == "env":
h[k] = E.get_history()
hist.append(h)
s = s2
for h in hist:
print(h)
print("")
self.assertTrue(True)
print("")
class TestSimulatorConversionBasedRevenueHoW(unittest.TestCase):
def test_sanity(self):
print("----------------------------------------")
print("SimulatorConversionBasedRevenueHoW sample run")
from ssa_sim_v2.simulator.modules.auctions.auctions_how import AuctionsPoissonHoW
from ssa_sim_v2.simulator.modules.click_probability.click_probability_how import ClickProbabilityLogisticLogHoW
from ssa_sim_v2.simulator.modules.clicks.clicks_how import ClicksBinomialClickProbModelHoW
from ssa_sim_v2.simulator.modules.conversions.conversions_how import ConversionsBinomialHoW
from ssa_sim_v2.simulator.modules.revenue.revenue_how import RevenueConversionBasedHoW
from ssa_sim_v2.simulator.modules.cpc.cpc_how import CPCBidMinusCpcDiffHoW
hist_keys = ["s", "a", "r", "env"]
S = namedtuple("State", ["t"])
A = namedtuple("Action", ["bid"])
Ssize = 168
Asize = 5001
Sset = [S(t) for t in range(Ssize)]
Aset = [A(round(float(b) / 100, 2)) for b in range(Asize)]
# Initialize auctions prior
auctions = np.random.exponential(100, size=168)
# Initialize clicks prior
pc_init = np.random.uniform(low=0.0, high=0.5, size=168)
bids_init = np.random.uniform(low=0.0, high=20.0, size=168)
click_prob_model = ClickProbabilityLogisticLogHoW(pc_init, bids_init)
# Initialize conversions prior
pv_init = np.random.uniform(low=0.001, high=0.02, size=168)
# Initialize revenue prior
avg_rpv = np.random.uniform(low=1000.0, high=4000.0, size=168)
# Initialize cpc prior
avg_bids = np.random.uniform(high=5.0, size=168)
avg_cpcs = np.random.uniform(high=avg_bids)
# Module setup for env
mods = {"auctions": AuctionsPoissonHoW(L=auctions),
"clicks": ClicksBinomialClickProbModelHoW(click_prob_model),
"conversions": ConversionsBinomialHoW(pv_init),
"revenue": RevenueConversionBasedHoW(avg_rpv),
"cpc": CPCBidMinusCpcDiffHoW(avg_bids, avg_cpcs)}
E = SimulatorConversionBasedRevenueHoW(Sset, Aset, mods)
E.reset()
s = E.state()
hist = []
N = 168
for n in range(N):
a = choice(Aset)
r, info = E.step(a)
s2 = E.state()
# Learning
# Hist-keeping
h = {}
for k in hist_keys:
if k == "s":
h[k] = s
if k == "a":
h[k] = a
if k == "r":
h[k] = r
if k == "env":
h[k] = E.get_history()
hist.append(h)
s = s2
for h in hist:
print(h)
print("")
self.assertTrue(True)
print("")
class TestSimulatorConversionBasedRevenueDateHoW(unittest.TestCase):
def setUp(self):
from ssa_sim_v2.simulator.modules.auctions.auctions_date_how import AuctionsPoissonDateHoW
from ssa_sim_v2.simulator.modules.click_probability.click_probability_date_how import ClickProbabilityLogisticLogDateHoW
from ssa_sim_v2.simulator.modules.clicks.clicks_date_how import ClicksBinomialClickProbModelDateHoW
from ssa_sim_v2.simulator.modules.cpc.cpc_date_how import CPCBidHistoricalAvgCPCDateHoW
from ssa_sim_v2.simulator.modules.conversions.conversions_date_how import ConversionsBinomialDateHoW
from ssa_sim_v2.simulator.modules.revenue.revenue_date_how import RevenueConversionBasedDateHoW
self.S = namedtuple("State", ["date", "how"])
self.A = namedtuple("Action", ["bid"])
self.tmp_df = pd.DataFrame(np.array(range(24)), columns=["hour_of_day"])
self.tmp_df["key"] = 1
self.dates = pd.DataFrame(pd.date_range('2016-01-01', '2016-01-02'), columns=["date"])
self.dates["key"] = 1
self.dates = | pd.merge(self.dates, self.tmp_df, on=["key"], how="left") | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 24 15:37:55 2021
@author: Gary
"""
import pandas as pd
import numpy as np
import build_common
trans_dir = build_common.get_transformed_dir()
lower_tolerance = 95
upper_tolerance = 105
density_min = 6.0
density_max = 13.0
# Normally set to True
remove_dropped_keys = True
class Carrier_ID():
def __init__(self,input_df,data_source='bulk'):
self.remove_dropped_keys = remove_dropped_keys
if not self.remove_dropped_keys:
print(' -- Not removing dropped keys from carrier sets')
self.df = input_df
self.in_upk = self.df.UploadKey
self.in_ik = self.df.IngredientKey
self.data_source = data_source
self.auto_fn = trans_dir+f'{data_source}/carrier_list_auto.csv'
self.curdf_fn = trans_dir+f'{data_source}/carrier_list_curated.csv'
self.probdf_fn = trans_dir+f'{data_source}/carrier_list_prob.csv'
# list of single purpose lables for carriers
self.wlst = ['carrier / base fluid', 'carrier/base fluid', 'carrier fluid',
'carrier','base fluid','base carrier fluid','carrier/base flud',
'base fluid / carrier','carrier/ base fluid','base/carrier fluid',
'carrier base fluid','water','base fluid ',' carrier / base fluid ',
'base fluid & mix water', 'base fluid & mix water,', 'fresh water',
'carrier/base fluid ', 'treatment carrier', 'carrier/basefluid',
'carrying agent', 'base / carrier fluid', 'carrier / base fluid - water',
'carrier fluid ', 'base frac fluid', 'water',
'water / produced water', 'carrier ', 'base carrier',
'fracture fluid', 'frac base fluid']
self.proppants = ['14808-60-7','1302-93-8','1318-16-7','1302-74-5','1344-28-1','14464-46-1','7631-86-9','1302-76-7']
self.gasses = ['7727-37-9','124-38-9']
self.merge_bgCAS()
self.make_MI_fields()
self.make_percent_sums()
self.fetch_carrier_lists()
self.check_for_prob_disc()
self.check_for_auto_disc()
self.check_auto_against_list()
self.save_curation_candidates()
def check_for_removed_keys(self,ref_df,do_IngKey=True):
"""When saved IngredientKeys are missing in new data sets, we drop the
associated disclosures from the curated list. This forces a new evaluation
of those disclosures in case they have been changed."""
if self.remove_dropped_keys:
testupk = pd.merge(self.in_upk,ref_df[['UploadKey']],
on='UploadKey',how='outer',indicator=True)
#print(testupk[testupk['_merge']=='right_only'])
dropkeys = testupk[testupk['_merge']=='right_only'].UploadKey.tolist()
if len(dropkeys)>0:
print(f' ** Dropping {len(dropkeys)} carriers because UploadKeys are missing in latest data')
ref_df = ref_df[~(ref_df.UploadKey.isin(dropkeys))]
#print(testupk.head(10))
if do_IngKey:
testik = pd.merge(self.in_ik,ref_df[['IngredientKey']],
on='IngredientKey',how='outer',indicator=True)
#print(testik[testik['_merge']=='right_only'])
dropkeys = testik[testik['_merge']=='right_only'].IngredientKey.tolist()
if len(dropkeys)>0:
print(f' ** Dropping {len(dropkeys)} carriers because IngredientKeys are missing in latest data')
ref_df = ref_df[~(ref_df.IngredientKey.isin(dropkeys))]
return ref_df
def fetch_carrier_lists(self):
print(' -- loading auto-detected records')
self.autodf = pd.read_csv(self.auto_fn,low_memory=False,
quotechar='$',encoding='utf-8')
self.autodf['is_new'] = False
self.autodf = self.check_for_removed_keys(self.autodf)
self.remove_disclosures(self.autodf)
print(' -- loading curation-detected records')
self.curdf = pd.read_csv(self.curdf_fn,low_memory=False,
quotechar='$',encoding='utf-8')
self.curdf['is_new'] = False
self.curdf = self.check_for_removed_keys(self.curdf)
self.remove_disclosures(self.curdf)
print(' -- loading problem records')
self.probdf = pd.read_csv(self.probdf_fn,low_memory=False,
quotechar='$',
encoding='utf-8')
self.probdf['is_new'] = False
self.probdf = self.check_for_removed_keys(self.probdf,do_IngKey=False)
self.remove_disclosures(self.probdf)
def merge_bgCAS(self):
#casing = pd.read_csv('./sources/casing_curate_master.csv',
casing = pd.read_csv(trans_dir+'casing_curated.csv',
quotechar='$',encoding='utf-8')
casing['is_valid_CAS'] = casing.bgCAS.str[0].isin(['0','1','2','3','4',
'5','6','7','8','9'])
self.df = pd.merge(self.df,casing[['CASNumber','IngredientName',
'bgCAS','is_valid_CAS']],
on=['CASNumber','IngredientName'],how='left')
self.df.is_valid_CAS.fillna(False,inplace=True)
def make_MI_fields(self):
# remove records that are more likely unreliable: when MI is small
cond = (self.df.MassIngredient>2)&(self.df.PercentHFJob>0)
t = self.df[cond][['MassIngredient','PercentHFJob','UploadKey']].copy()
# make a simple ratio of MI to %HFJ. If everything is consistent, this
# ratio should essentially be the same for all records in a disclosure
t['permassratio'] = t.MassIngredient/t.PercentHFJob
gb = t.groupby('UploadKey',as_index=False)['permassratio'].agg(['min','max']).reset_index()
gb.columns = ['UploadKey','small','big']
gb['rat_dev'] = (gb.big-gb.small)/gb.big
# set MIok to true if the range within a disclosure is less than 10%
# MIok is a disclosure level flag.
gb['MIok'] = gb.rat_dev<.1
print(f'Creating MIok: Number disclosures with MI: {len(gb)}, out of tolerance: {len(gb[gb.rat_dev>0.1])}')
self.df = pd.merge(self.df,gb[['UploadKey','MIok']],on='UploadKey',how='left')
self.df.MIok = np.where(~cond,False,self.df.MIok)
cond2 = (self.df.MassIngredient>5)&(self.df.TotalBaseWaterVolume>10000)&self.df.MIok
self.df['dens_test'] = np.where(cond2,
self.df.MassIngredient/self.df.TotalBaseWaterVolume,
np.NaN)
# density can be within pretty wide range; will check again at
c1 = self.df.dens_test>density_min
c2 = self.df.dens_test<density_max
self.df['maybe_water_by_MI']=np.where(c1&c2,'yes','no')
self.df.maybe_water_by_MI = np.where(self.df.dens_test.isna(),
'not testable',
self.df.maybe_water_by_MI)
def make_percent_sums(self):
gball = self.df.groupby('UploadKey',as_index=False)[['PercentHFJob',
'is_valid_CAS']].sum()
gball['has_no_percHF'] = ~(gball.PercentHFJob>0)
gball['has_no_valid_CAS'] = ~(gball.is_valid_CAS>0)
gbmax = self.df.groupby('UploadKey',as_index=False)[['PercentHFJob',
'TotalBaseWaterVolume']].max()
gbmax.columns = ['UploadKey','PercMax','TBWV']
gball = pd.merge(gball,gbmax,on='UploadKey',how='left')
cond = self.df.PercentHFJob>0
gbw = self.df[cond].groupby('UploadKey',as_index=False)['PercentHFJob'].sum()
gbw.columns = ['UploadKey','percSumAll']
gbwo = self.df[cond&self.df.is_valid_CAS].groupby('UploadKey',as_index=False)['PercentHFJob'].sum()
gbwo.columns = ['UploadKey','percSumValid']
gbwoSA = self.df[cond&(~(self.df.bgCAS=='sysAppMeta'))].groupby('UploadKey',as_index=False)['PercentHFJob'].sum()
gbwoSA.columns = ['UploadKey','percNoSysApp']
mg = pd.merge(gball,gbw,on=['UploadKey'],how='left')
mg = pd.merge(mg,gbwo,on='UploadKey',how='left')
mg = | pd.merge(mg,gbwoSA,on='UploadKey',how='left') | pandas.merge |
from .entities.entity import Session, engine, Base
from .entities.project import Project, ProjectSchema
from .entities.pbutton import PButton, PButtonSchema
from .entities.bookmark import Bookmark, BookmarkSchema
from .entities.layout import Layout, LayoutSchema
from .entities.layoutcolumn import LayoutColumn, LayoutColumnSchema
from os import walk
import os
from os.path import join
from datetime import datetime
from flask import send_from_directory
import numpy as np
import pandas as pd
import sqlite3
import yape
import logging
import traceback
# note: getting an error: ImportError: Python is not installed as a framework. The Mac OS X backend...
# see to fix it:
# https://stackoverflow.com/questions/49367013/pipenv-install-matplotlib
from subprocess import call
UPLOAD_FOLDER = "/Users/kazamatzuri/work/temp/yape-data"
class ProjectManager:
class __ProjectManager:
def __str__(self):
return repr(self)
instance = None
def __init__(self):
if not ProjectManager.instance:
ProjectManager.instance = ProjectManager.__ProjectManager()
def __getattr__(self, name):
return getattr(self.instance, name)
@staticmethod
def addBookmark(data):
print(data)
bm = Bookmark(data=data, created_by="user")
session = Session()
session.add(bm)
session.commit()
newbm = BookmarkSchema().dump(bm)
session.close()
return newbm
@staticmethod
def getProject(projectId):
session = Session()
project_object = session.query(Project).get(projectId)
if project_object == None:
session.close()
return None
schema = ProjectSchema()
project = schema.dump(project_object)
session.close()
return project
@staticmethod
def getBookmark(bmid):
session = Session()
bookmark = session.query(Bookmark).get(bmid)
if bookmark == None:
session.close()
return None
print(bookmark)
schema = BookmarkSchema()
bmi = schema.dump(bookmark)
session.close()
return bmi
@staticmethod
def getLayout(id=None):
session = Session()
if id == None:
layouts = session.query(Layout)
if layouts.count() == 0:
session.close()
return []
schema = LayoutSchema(many=True)
lys = schema.dump(layouts)
session.close()
return lys
else:
layout = session.query(Layout).get(id)
if layout == None:
session.close()
return None
print(layout)
schema = LayoutSchema()
l = schema.dump(layout)
session.close()
return l
@staticmethod
def saveLayout(data):
print(data["name"])
session = Session()
cl = session.query(Layout).filter(Layout.name == data["name"])
if cl.count() == 0:
cl = Layout(name=data["name"], created_by="user")
for c in data["cols"]:
cl.columns.append(LayoutColumn(cl, c))
session.add(cl)
session.commit()
cl = LayoutSchema().dump(cl)
session.close()
return cl
else:
session.close()
return None
@staticmethod
def getBookmarks():
session = Session()
bookmarks = session.query(Bookmark)
if bookmarks == None:
session.close()
return []
schema = BookmarkSchema(many=True)
bmi = schema.dump(bookmarks)
session.close()
return bmi
@staticmethod
def getProjectBookmarks(prid):
session = Session()
bookmarks = session.query(Bookmark).filter(Bookmark.project == prid)
if bookmarks == None:
session.close()
return []
schema = BookmarkSchema(many=True)
bmi = schema.dump(bookmarks)
session.close()
return bmi
@staticmethod
def check_data(db, name):
cur = db.cursor()
data = True
try:
cur.execute("SELECT * FROM " + name + " LIMIT 2")
if len(cur.fetchall()) < 2:
data = False
except:
data = False
pass
return data
@staticmethod
def generateGraphs(id):
session = Session()
pb = session.query(PButton).get(id)
dir = pb.graphdir
if dir is None or dir == "":
dir = pb.filename.split(".")[0]
pb.ran_last = datetime.now()
pb.graphdir = dir
# pb.save()
session.commit()
bdir = join(UPLOAD_FOLDER, dir)
print(bdir)
f = join(bdir, pb.filename)
print(f)
# call(["yape","-q","-a","-o",dir,f])
session.close()
return
@staticmethod
def serveImg(id, url):
session = Session()
pb = session.query(PButton).get(id)
dir = join(UPLOAD_FOLDER, pb.graphdir)
session.close()
return send_from_directory(dir, url)
@staticmethod
def createDB(id):
# print("createdb "+id)
session = Session()
pb = session.query(PButton).get(id)
dir = pb.graphdir
if dir is None or dir == "":
dir = pb.filename.split(".")[0]
pb.graphdir = dir
dir = join(UPLOAD_FOLDER, pb.graphdir)
filedb = join(dir, "data.db")
pb.database = filedb
bdir = join(UPLOAD_FOLDER, dir)
f = join(bdir, pb.filename)
print("before try")
try:
os.remove(filedb)
except OSError:
pass
# call(["yape","-q","--filedb",filedb,f])
params = ["--filedb", filedb, f]
# logging.debug(params)
print(filedb)
args = yape.main.parse_args(params)
try:
yape.main.yape2(args)
except:
logging.debug("error while parsing:" + f)
logging.debug(traceback.format_exc())
print(traceback.format_exc())
session.commit()
session.close()
@staticmethod
def assertDB(id):
session = Session()
pb = session.query(PButton).get(id)
filedb = pb.database
if filedb is None or filedb == "":
ProjectManager.createDB(id)
session.close()
return {}
@staticmethod
def getTextFields(id):
session = Session()
pb = session.query(PButton).get(id)
filedb = pb.database
session.close()
db = sqlite3.connect(filedb)
cur = db.cursor()
list = [
"license",
"cpffile",
"ss1",
"ss2",
"cstatc11",
"cstatc12",
"cstatc13",
"cstatc14",
"cstatD1",
"cstatD2",
"cstatD3",
"cstatD4",
"cstatD5",
"cstatD6",
"cstatD7",
"cstatD8",
"windowsinfo",
"linuxinfo",
"tasklist",
"cpu",
"df-m",
"fdisk-l",
"ifconfig",
"ipcs",
"mount",
"pselfy1",
"pselfy2",
"pselfy3",
"pselfy4",
"sysctl-a",
]
data = {}
for field in list:
if ProjectManager.check_data(db, field):
cur.execute("select * from " + field)
data[field] = cur.fetchall()
cur.close()
return data
@staticmethod
def getFields(id):
session = Session()
ProjectManager.assertDB(id)
pb = session.query(PButton).get(id)
filedb = pb.database
session.close()
db = sqlite3.connect(filedb)
cursor = db.execute("select * from mgstat")
names = [description[0] for description in cursor.description]
return names
@staticmethod
def getDescription(id):
session = Session()
ProjectManager.assertDB(id)
pb = session.query(PButton).get(id)
filedb = pb.database
session.close()
db = sqlite3.connect(filedb)
list = ["mgstat", "perfmon", "iostat", "vmstat", "sard", "sar-u"]
data = {}
for i in list:
if ProjectManager.check_data(db, i):
cursor = db.execute("select * from " + i)
names = [description[0] for description in cursor.description]
if "datetime" in names:
names.remove("datetime")
data[i] = names
return data
@staticmethod
def getData(id, fields):
session = Session()
ProjectManager.assertDB(id)
pb = session.query(PButton).get(id)
filedb = pb.database
session.close()
db = sqlite3.connect(filedb)
print(fields)
query = "select datetime,Glorefs from mgstat"
if fields is not None:
query = "select datetime"
for i in fields:
query += "," + i
query += " from mgstat"
df = | pd.read_sql_query(query, db) | pandas.read_sql_query |
# hackathon T - Hacks 3.0
# flask backend of data-cleaning website
import matplotlib.pyplot as plt
#import tensorflow as tf
#from tensorflow.keras import layers
import pandas as pd
import numpy as np
from flask import *
import os
from datetime import *
from subprocess import Popen, PIPE
from math import floor
import converter as con
from flask_ngrok import run_with_ngrok
from meanShift import Mean_Shift
from matplotlib import style
#import seaborn as sns
style.use('ggplot')
from sklearn.model_selection import train_test_split
from datetime import datetime
pd.options.display.max_rows = 10
pd.options.display.float_format = "{:.1f}".format
colors = 10*['g', 'r', 'b', 'c', 'k']
from pyparsing import (
Literal,
Word,
Group,
Forward,
alphas,
alphanums,
Regex,
ParseException,
CaselessKeyword,
Suppress,
delimitedList,
)
import math
import operator
exprStack = []
def push_first(toks):
exprStack.append(toks[0])
def push_unary_minus(toks):
for t in toks:
if t == "-":
exprStack.append("unary -")
else:
break
bnf = None
def BNF():
"""
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
"""
global bnf
if not bnf:
# use CaselessKeyword for e and pi, to avoid accidentally matching
# functions that start with 'e' or 'pi' (such as 'exp'); Keyword
# and CaselessKeyword only match whole words
e = CaselessKeyword("E")
pi = CaselessKeyword("PI")
# fnumber = Combine(Word("+-"+nums, nums) +
# Optional("." + Optional(Word(nums))) +
# Optional(e + Word("+-"+nums, nums)))
# or use provided pyparsing_common.number, but convert back to str:
# fnumber = ppc.number().addParseAction(lambda t: str(t[0]))
fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?")
ident = Word(alphas, alphanums + "_$")
plus, minus, mult, div = map(Literal, "+-*/")
lpar, rpar = map(Suppress, "()")
addop = plus | minus
multop = mult | div
expop = Literal("^")
expr = Forward()
expr_list = delimitedList(Group(expr))
# add parse action that replaces the function identifier with a (name, number of args) tuple
def insert_fn_argcount_tuple(t):
fn = t.pop(0)
num_args = len(t[0])
t.insert(0, (fn, num_args))
fn_call = (ident + lpar - Group(expr_list) + rpar).setParseAction(
insert_fn_argcount_tuple
)
atom = (
addop[...]
+ (
(fn_call | pi | e | fnumber | ident).setParseAction(push_first)
| Group(lpar + expr + rpar)
)
).setParseAction(push_unary_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left
# exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor <<= atom + (expop + factor).setParseAction(push_first)[...]
term = factor + (multop + factor).setParseAction(push_first)[...]
expr <<= term + (addop + term).setParseAction(push_first)[...]
bnf = expr
return bnf
# map operator symbols to corresponding arithmetic operations
epsilon = 1e-12
opn = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"^": operator.pow,
}
fn = {
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"exp": math.exp,
"abs": abs,
"trunc": int,
"round": round,
"sgn": lambda a: -1 if a < -epsilon else 1 if a > epsilon else 0,
# functionsl with multiple arguments
"multiply": lambda a, b: a * b,
"hypot": math.hypot,
# functions with a variable number of arguments
"all": lambda *a: all(a),
}
def evaluate_stack(s):
op, num_args = s.pop(), 0
if isinstance(op, tuple):
op, num_args = op
if op == "unary -":
return -evaluate_stack(s)
if op in "+-*/^":
# note: operands are pushed onto the stack in reverse order
op2 = evaluate_stack(s)
op1 = evaluate_stack(s)
return opn[op](op1, op2)
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op in fn:
# note: args are pushed onto the stack in reverse order
args = reversed([evaluate_stack(s) for _ in range(num_args)])
return fn[op](*args)
elif op[0].isalpha():
raise Exception("invalid identifier '%s'" % op)
else:
# try to evaluate as int first, then as float if int fails
try:
return int(op)
except ValueError:
return float(op)
def test(s):
val = "NA"
exprStack[:] = []
try:
results = BNF().parseString(s, parseAll=True)
val = evaluate_stack(exprStack[:])
except ParseException as pe:
print(s, "failed parse:", str(pe))
except Exception as e:
print(s, "failed eval:", str(e), exprStack)
return val
def feature_pie(filename, feature1, feature2, class_size = 10):
df = pd.read_csv(filename)
sums = df.groupby(df[feature1])[feature2].sum()
plt.axis('equal')
plt.pie(sums, labels=sums.index, autopct='%1.1f%%', shadow=True, startangle=140)
plt.title("Pie chart on basis of "+feature2)
name = filename.split('.')
plt.savefig(name[0]+".png")
plt.close()
def feature_scatter(filename, feature1, feature2):
df = pd.read_csv(filename)
plt.axis('equal')
plt.pie(feature1, feature2, autopct='%1.1f%%', shadow=True, startangle=140)
plt.title("Scatter plot between "+feature1+" and "+feature2)
name = filename.split('.')
plt.savefig(name[0]+".png")
plt.close()
def new_feature(filename, com, name):
df = pd.read_csv(filename)
com = com.split(',')
formula = "_"
temp = "_"
for i, c in enumerate(com):
if c == "formula":
formula = com[i+1]
temp = formula
vals = []
i = 0
print(name)
if name != " ":
i = 1
n = len(df)
for j in range(n):
for k, c in enumerate(com):
if k%2 == 0:
if c == "formula":
break
formula = formula.replace(c, str(df.at[j, com[k+1]]))
vals.append(test(formula))
formula = temp
col = len(df.axes[1])
print(vals)
df[name] = vals
"""
if name != " ":
df.insert(col, vals, True)
else:
df.insert(col, vals, True)
"""
del df['Unnamed: 0']
os.remove(filename)
df.to_csv(filename)
def disp(filename):
df = | pd.read_csv(filename) | pandas.read_csv |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
# Disclaimer: This code can be found here: https://github.com/aws/sagemaker-training-toolkit/blob/master/test/unit/test_encoder.py
#
import json
# import numpy as np
import pandas as pd
import pytest
from mldock.platform_helpers.mldock.inference.content_decoders import (
pandas as pandas_decoders,
)
class TestPandasDecoders:
"""Tests the pandas decoder methods"""
@staticmethod
@pytest.mark.parametrize(
"target, expected",
[
({"col": [42, 6, 9]}, pd.DataFrame([42, 6, 9], columns=["col"])),
(
{"col": [42.0, 6.0, 9.0]},
pd.DataFrame([42.0, 6.0, 9.0], columns=["col"]),
),
(
{"col": ["42", "6", "9"]},
pd.DataFrame(["42", "6", "9"], columns=["col"]),
),
(
{"col": [u"42", u"6", u"9"]},
pd.DataFrame([u"42", u"6", u"9"], columns=["col"]),
),
],
)
def test_json_list_to_pandas(target, expected):
"""test json data is correctly decoded to pandas dataframe"""
actual = pandas_decoders.json_list_to_pandas(json.dumps(target))
pd.testing.assert_frame_equal(actual, expected)
@staticmethod
@pytest.mark.parametrize(
"target, expected",
[
(b"col\n42\n6\n9\n", pd.DataFrame([42, 6, 9], columns=["col"])),
(b"col\n42.0\n6.0\n9.0\n", pd.DataFrame([42.0, 6.0, 9.0], columns=["col"])),
(b"col\n42\n6\n9\n", pd.DataFrame([42, 6, 9], columns=["col"])),
(
b'col\n"False,"\n"True."\n"False,"\n',
pd.DataFrame(["False,", "True.", "False,"], columns=["col"]),
),
(
b'col\naaa\n"b""bb"\nccc\n',
pd.DataFrame(["aaa", 'b"bb', "ccc"], columns=["col"]),
),
(b'col\n"a\nb"\nc\n', pd.DataFrame(["a\nb", "c"], columns=["col"])),
],
)
def test_csv_to_pandas(target, expected):
"""test csv data is correctly decoded to pandas dataframe"""
actual = pandas_decoders.csv_to_pandas(target)
| pd.testing.assert_frame_equal(actual, expected) | pandas.testing.assert_frame_equal |
from nsepy import get_history
from nsepy import get_index_pe_history
from datetime import date
import pandas as pd
from pandas import Series, DataFrame
def indexhistory(indexsymbol):
index_history = get_history(symbol=indexsymbol, start=date(2009, 3, 31), end=date(2009, 3, 31), index=True)
index_history = index_history.append(
get_history(symbol=indexsymbol, start=date(2010, 3, 31), end=date(2010, 3, 31), index=True))
index_history = index_history.append(
get_history(symbol=indexsymbol, start=date(2011, 3, 31), end=date(2011, 3, 31), index=True))
index_history = index_history.append(
get_history(symbol=indexsymbol, start=date(2012, 3, 30), end=date(2012, 3, 30), index=True))
index_history = index_history.append(
get_history(symbol=indexsymbol, start=date(2013, 3, 28), end=date(2013, 3, 28), index=True))
index_history = index_history.append(
get_history(symbol=indexsymbol, start=date(2014, 3, 31), end=date(2014, 3, 31), index=True))
index_history = index_history.append(
get_history(symbol=indexsymbol, start=date(2015, 3, 31), end=date(2015, 3, 31), index=True))
index_history = index_history.append(
get_history(symbol=indexsymbol, start=date(2016, 3, 31), end=date(2016, 3, 31), index=True))
index_history = index_history.append(
get_history(symbol=indexsymbol, start=date(2017, 3, 31), end=date(2017, 3, 31), index=True))
index_history = index_history.append(
get_history(symbol=indexsymbol, start=date(2018, 3, 28), end=date(2018, 3, 28), index=True))
index_history = index_history.append(
get_history(symbol=indexsymbol, start=date(2019, 3, 29), end=date(2019, 3, 29), index=True))
index_history = index_history.append(
get_history(symbol=indexsymbol, start=date(2020, 3, 31), end=date(2020, 3, 31), index=True))
print(index_history)
return index_history
def PEhistory(indexsymbol):
pe_history = get_index_pe_history(symbol=indexsymbol, start=date(2009, 3, 31), end=date(2009, 3, 31))
pe_history = pe_history.append(
get_index_pe_history(symbol=indexsymbol, start=date(2010, 3, 31), end=date(2010, 3, 31)))
pe_history = pe_history.append(
get_index_pe_history(symbol=indexsymbol, start=date(2011, 3, 31), end=date(2011, 3, 31)))
pe_history = pe_history.append(
get_index_pe_history(symbol=indexsymbol, start=date(2012, 3, 30), end=date(2012, 3, 30)))
pe_history = pe_history.append(
get_index_pe_history(symbol=indexsymbol, start=date(2013, 3, 28), end=date(2013, 3, 28)))
pe_history = pe_history.append(
get_index_pe_history(symbol=indexsymbol, start=date(2014, 3, 31), end=date(2014, 3, 31)))
pe_history = pe_history.append(
get_index_pe_history(symbol=indexsymbol, start=date(2015, 3, 31), end=date(2015, 3, 31)))
pe_history = pe_history.append(
get_index_pe_history(symbol=indexsymbol, start=date(2016, 3, 31), end=date(2016, 3, 31)))
pe_history = pe_history.append(
get_index_pe_history(symbol=indexsymbol, start=date(2017, 3, 31), end=date(2017, 3, 31)))
pe_history = pe_history.append(
get_index_pe_history(symbol=indexsymbol, start=date(2018, 3, 28), end=date(2018, 3, 28)))
pe_history = pe_history.append(
get_index_pe_history(symbol=indexsymbol, start=date(2019, 3, 29), end=date(2019, 3, 29)))
pe_history = pe_history.append(
get_index_pe_history(symbol=indexsymbol, start=date(2020, 3, 31), end=date(2020, 3, 31)))
print(pe_history)
return pe_history
pe_history = PEhistory("NIFTY ENERGY")
index_history = indexhistory("NIFTY ENERGY")
pe_analysis = pd.merge(pe_history, index_history, on='Date')
earnings = (pe_analysis['Close'] / pe_analysis['P/E']).rename("Earnings")
earnings = pd.DataFrame(earnings)
pe_analysis = | pd.merge(pe_analysis, earnings, on='Date') | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 20 11:55:00 2019
@author: github.com/sahandv
"""
import sys
import time
import gc
import collections
import json
import re
import os
import pprint
from random import random
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.cluster import AgglomerativeClustering, KMeans, SpectralClustering, AffinityPropagation
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.manifold import TSNE
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from yellowbrick.cluster import KElbowVisualizer
import scipy.cluster.hierarchy as sch
from scipy import spatial,sparse,sign
from bokeh.io import push_notebook, show, output_notebook, output_file
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, LabelSet
import nltk
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
import fasttext
from gensim.models import FastText as fasttext_gensim
from gensim.test.utils import get_tmpfile
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
stop_words = set(stopwords.words("english"))
from sciosci.assets import keyword_assets as kw
from sciosci.assets import generic_assets as sci
from sciosci.assets import advanced_assets as aa
period_cluster = '2017-2018 13'
# Read cluster centers
cluster_centers = pd.read_csv('/home/sahand/GoogleDrive/Data/FastText doc clusters - SIP/50D/cluster_centers/agglomerative ward '+period_cluster,index_col=0)
# Read and make keyword list
keywords = pd.read_csv('/home/sahand/GoogleDrive/Data/Author keywords - 29 Oct 2019/1990-2018 keyword frequency',names=['keyword','frequency'])
keywords = keywords[keywords['frequency']>20]
keywords_list = keywords['keyword'].values.tolist()
# Get keyword embeddings
gensim_model_address = '/home/sahand/GoogleDrive/Data/FastText Models/50D/fasttext-scopus_wos-merged-310k_docs-gensim 50D.model'
model = fasttext_gensim.load(gensim_model_address)
# Save in a list
keyword_vectors = []
for token in tqdm(keywords_list[:],total=len(keywords_list[:])):
keyword_vectors.append(model.wv[token])
# Cosine distance of the cluster centers and keywords to find the closest keywords to clusters
names = []
names.append('clusters')
sim_A_to_B = []
for idx_A,vector_A in cluster_centers.iterrows():
inner_similarity_scores = []
inner_similarity_scores.append(idx_A)
for idx_B,vector_B in enumerate(keyword_vectors):
distance_tmp = spatial.distance.cosine(vector_A.values, vector_B)
similarity_tmp = distance_tmp#1 - distance_tmp
# inner_similarity_scores.append(keywords_list[idx_B])
inner_similarity_scores.append(similarity_tmp)
if idx_A == 0:
# names.append('keyword_'+str(idx_B))
names.append(keywords_list[idx_B])
sim_A_to_B.append(inner_similarity_scores)
# print('cluster of A:',idx_A,'to cluster of B:',idx_B,'similarity',similarity_tmp)
sim_A_to_B = | pd.DataFrame(sim_A_to_B,columns=names) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 26 14:23:09 2022
@author: <NAME>
This module aims to reproduce the coefficient of variation (CV) report
generated by MetroloJ, an ImageJ plugin.
Given a .tif file, this module will produce the following elements:
- original images with rois (region of interests) marked on them.
- microscopy info dataframe
- rois' histogram ploting the number of pixels per gray intensity value
for all the images
- dataframe enclosing info about the roi's pixels with significant
intensities.
Note: rois are defined as the central 20% of the given image.
Note: Code tested on one or multi image .tif file (from homogeneity and
cv samples)
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.morphology import closing, square
from skimage.draw import polygon_perimeter
from skimage.color import label2rgb
from skimage.measure import label, regionprops
from metroloj import common as cm
# 1. Get roi (default central 20% of the original image) for a given 2d image
def get_roi_default(tiff_final):
"""
Select the default Region Of Interest (ROI) from the initial image,
e.i. select the central 20% of the whole np.array and return it.
The returned arrays, one per image, are enclosed in a list.
Parameters
----------
tiff_data : np.array
3d np.array representing the image data.
the first dimension should represent image index (z,x,y).
Returns
-------
list : list
list of 2 elements:
1. dict enclosing info about the ROI
2. list of ROIs pictures to display
"""
ROI_info = {}
ROI_nb_pixels_list = []
ROI_start_pixel_list = []
ROI_end_pixel_list = []
ROI_Original_ratio_list = []
# we assume that images from same tiff file have the same size
try:
nb_images, xdim, ydim = tiff_final.shape
except ValueError:
xdim, ydim = tiff_final.shape
nb_images = 1
# we want the central 20% of the original image
h, w = int(xdim*0.4), int(ydim*0.4)
# roi defined by top-left (start) and bottom-right (end) pixels
startx, endx = int(xdim//2 - h//2), int(xdim//2 + h//2)
starty, endy = int(ydim//2 - w//2), int(ydim//2 + w//2)
roi_start_pixel = [startx, starty]
roi_end_pixel = [endx, endy]
# initialization of the desired output
xdim_roi, ydim_roi = endx-startx, endy-starty
roi_final = np.zeros((nb_images, xdim_roi, ydim_roi), dtype=int)
if nb_images == 1:
roi_data_temp = tiff_final[startx:endx, starty:endy]
# add roi_temp to the final roi
roi_final = roi_data_temp
# lists for info dataframe
roi_nb_pixels = roi_data_temp.shape
ROI_nb_pixels_list.append(roi_nb_pixels)
ROI_start_pixel_list.append(roi_start_pixel)
ROI_end_pixel_list.append(roi_end_pixel)
ROI_Original_ratio_list.append("20%")
else:
for i in range(nb_images):
roi_data_temp = tiff_final[i][startx:endx, starty:endy]
# add roi_temp to the final roi
roi_final[i] = roi_data_temp
# lists for info dataframe
roi_nb_pixels = roi_data_temp.shape
ROI_nb_pixels_list.append(roi_nb_pixels)
ROI_start_pixel_list.append(roi_start_pixel)
ROI_end_pixel_list.append(roi_end_pixel)
ROI_Original_ratio_list.append("20%")
# dict enclosing info about the ROI
ROI_info["ROI_nb_pixels"] = ROI_nb_pixels_list
ROI_info["ROI_start_pixel"] = ROI_start_pixel_list
ROI_info["ROI_end_pixel"] = ROI_end_pixel_list
ROI_info["ROI_Original_ratio"] = ROI_Original_ratio_list
ROI_info = pd.DataFrame(ROI_info)
return ROI_info, roi_final
# 2. Compute cv
def get_segmented_image(img):
"""
Given a 2D np.array, it replaces all the pixels with an intensity below
a threshold otsu value by 0 as well as artifacts connected to image border.
Parameters
----------
img : np.array
Original image in a 2D format.
Returns
-------
img : np.array
2D np.array where only pixels with significant intensity are given
non null values.
"""
# define threshold
thresh = threshold_otsu(img)
# boolean matrice: True represent the pixels of interest
bw = closing(img > thresh, square(3))
# remove artifacts connected to image border
cleared = clear_border(bw)
# get segmented image
xtot, ytot = np.shape(img)
for i in range(xtot):
for j in range(ytot):
if not cleared[i, j]:
img[i, j] = 0
return img
def get_cv_table_global(tiff_data, output_dir=None):
"""
For each np.arrays of the given list, it computes the Coefficient of
Variation (cv) of the central 20% (ROI).
Parameters
----------
tiff_data : np.array
3d np.arrays
output_dir : str, optional
if specified, save the table to the output_dir.
the default is None.
Returns
-------
cv_table : dict
dict enclosing info about the pixels with significant intensities
of the segemented ROI of each given np.array:
1. standard deviation
2. mean
3. number of pixels
4. Coefficient of Variation (cv)
5. Normalized cv: cv relative to min value.
"""
std_intensity_list = []
mean_intensity_list = []
nb_pixels_list = []
cv_list = []
def get_cv_table_global_single(img):
img_segmented = get_segmented_image(img)
ball_intensity_vec_temp = img_segmented[img_segmented != 0]
# Statistics
std_intensity_temp = np.std(ball_intensity_vec_temp)
mean_intensity_temp = np.mean(ball_intensity_vec_temp)
nb_pixels_temp = len(ball_intensity_vec_temp)
cv_temp = std_intensity_temp/mean_intensity_temp
return std_intensity_temp, mean_intensity_temp, nb_pixels_temp, cv_temp
# nb_img: we assume that images from same tiff file have the same size
try:
nb_images, xdim, ydim = tiff_data.shape
except AttributeError and ValueError:
xdim, ydim = tiff_data.shape
nb_images = 1
if nb_images == 1:
std_, mean_, nb_pixels_, cv_ = get_cv_table_global_single(tiff_data)
# save
std_intensity_list.append(std_)
mean_intensity_list.append(mean_)
nb_pixels_list.append(nb_pixels_)
cv_list.append(cv_)
else:
for array_ in tiff_data:
std_, mean_, nb_pixels_, cv_ = get_cv_table_global_single(array_)
# save
std_intensity_list.append(std_)
mean_intensity_list.append(mean_)
nb_pixels_list.append(nb_pixels_)
cv_list.append(cv_)
cv_normalized = np.divide(cv_list, min(cv_list))
cv_dict = {"sd": np.around(np.array(std_intensity_list), 2),
"average": np.around(mean_intensity_list, 2),
"nb_pixels": np.around(nb_pixels_list, 2),
"cv": np.around(cv_list, 2),
"cv_relative_to_min": np.around(cv_normalized, 2)
}
if output_dir is not None:
| pd.DataFrame(cv_dict) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from covsirphy.util.error import SubsetNotFoundError
from covsirphy.cleaning.cbase import CleaningBase
class OxCGRTData(CleaningBase):
"""
Data cleaning of OxCGRT dataset.
Args:
filename (str or None): CSV filename of the dataset
data (pandas.DataFrame or None):
Index
reset index
Columns
- Date: Observation date
- ISO3: ISO 3166-1 alpha-3, like JPN
- Country: country/region name
- variables defined by @variables
citation (str or None): citation or None (empty)
variables (list[str] or None): variables to parse or None (use default variables listed as follows)
- School_closing
- Workplace_closing
- Cancel_events
- Gatherings_restrictions
- Transport_closing
- Stay_home_restrictions
- Internal_movement_restrictions
- International_movement_restrictions
- Information_campaigns
- Testing_policy
- Contact_tracing
- Stringency_index
Note:
Either @filename (high priority) or @data must be specified.
Note:
The default policy indices (Overall etc.) are from README.md and documentation/index_methodology.md in
https://github.com/OxCGRT/covid-policy-tracker/
"""
OXCGRT_VARS = [
"School_closing",
"Workplace_closing",
"Cancel_events",
"Gatherings_restrictions",
"Transport_closing",
"Stay_home_restrictions",
"Internal_movement_restrictions",
"International_movement_restrictions",
"Information_campaigns",
"Testing_policy",
"Contact_tracing",
"Stringency_index"
]
# Indicators except for Stringency index
OXCGRT_VARS_INDICATORS = [v for v in OXCGRT_VARS if v != "Stringency_index"]
def __init__(self, filename=None, data=None, citation=None, variables=None):
self._variables = variables or self.OXCGRT_VARS[:]
super().__init__(filename=filename, data=data, citation=citation, variables=self._variables)
def _cleaning(self):
"""
Perform data cleaning of the raw data.
Returns:
pandas.DataFrame
Index
reset index
Columns
- Date (pandas.Timestamp): Observation date
- ISO3 (str): ISO 3166-1 alpha-3, like JPN
- Country (pandas.Category): country/region name
- Province (pandas.Category): province/prefecture/state name
- variables defined by OxCGRTData(variables)
"""
df = self._raw.copy()
# Prepare data for Greenland
grl_df = df.loc[df[self.COUNTRY] == "Denmark"].copy()
grl_df.loc[:, [self.ISO3, self.COUNTRY]] = ["GRL", "Greenland"]
df = | pd.concat([df, grl_df], sort=True, ignore_index=True) | pandas.concat |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import folium
survey = pd.read_csv('./Data_Science_Topics_Survey.csv')
survey.columns = ['Date', 'Data Visualization', 'Machine Learning',
'Data Analysis / Statistics', 'Big Data (Spark / Hadoop)', 'Data Journalism', 'Deep Learning']
# print(survey['Data Visualization'].value_counts())
list = []
for i in range(len(survey.columns) - 1):
list.append(survey.ix[:, i + 1].value_counts().tolist())
transformed_survey = pd.DataFrame(
list, columns=['Very interested', 'Somewhat interested', 'Not interested'])
transformed_survey.set_index(survey.columns[1:], inplace=True)
transformed_survey.sort_index(inplace=True)
transformed_survey = transformed_survey[['Not interested', 'Somewhat interested', 'Very interested']]
print(transformed_survey)
transformed_survey = transformed_survey[['Very interested', 'Somewhat interested', 'Not interested']]
transformed_survey.sort_values(
by='Very interested', ascending=False, inplace=True)
transformed_survey['Very interested'] = transformed_survey['Very interested'].div(
len(survey.index)).mul(100).round(2)
transformed_survey['Somewhat interested'] = transformed_survey['Somewhat interested'].div(
len(survey.index)).mul(100).round(2)
transformed_survey['Not interested'] = transformed_survey['Not interested'].div(
len(survey.index)).mul(100).round(2)
print(transformed_survey)
ax = transformed_survey.plot.bar(width=0.8, figsize=(
20, 8), color=('#5cb85c', '#5bc0de', '#d9534f'), fontsize=14)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_yticklabels([])
plt.yticks([])
# easier to read labels
plt.xticks(rotation=0)
plt.title("Percentage of Respondents' Interest in Data Science Areas", fontsize=16)
for p in ax.patches:
b = p.get_bbox()
val = "{:.2f}".format(b.y1 + b.y0)
ax.annotate(val, ((b.x0 + b.x1)/2 - 0.09, b.y1 + 0.5), fontsize=14)
plt.legend(fontsize=14)
plt.tight_layout()
plt.savefig('assignment.png')
crime = | pd.read_csv('./Police_Department_Incidents_-_Previous_Year__2016_.csv') | pandas.read_csv |
# 대신증권 API
# 주식 현재가(10차호가/시간대별/일자별) 구현하기 예제
# 주식 현재가 화면을 구성하는 10차 호가, 시간대별, 일자별 데이터를 구현한 파이썬 예제입니다
# 화면 UI 는 PYQT 를 이용하였고 첨부된 파일에서 소스와 UI 를 받아 확인 가능합니다.
import sys
import pandas
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5 import uic
from PyQt5.QtCore import *
import win32com.client
from pandas import Series, DataFrame
import locale
# cp object
g_objCodeMgr = win32com.client.Dispatch("CpUtil.CpCodeMgr")
g_objCpStatus = win32com.client.Dispatch("CpUtil.CpCybos")
g_objCpTrade = win32com.client.Dispatch("CpTrade.CpTdUtil")
locale.setlocale(locale.LC_ALL, '')
# 현재가 정보 저장 구조체
class stockPricedData:
def __init__(self):
self.dicEx = {ord('0'): "동시호가/장중 아님", ord('1'): "동시호가", ord('2'): "장중"}
self.code = ""
self.name = ""
self.cur = 0 # 현재가
self.diff = 0 # 대비
self.diffp = 0 # 대비율
self.offer = [0 for _ in range(10)] # 매도호가
self.bid = [0 for _ in range(10)] # 매수호가
self.offervol = [0 for _ in range(10)] # 매도호가 잔량
self.bidvol = [0 for _ in range(10)] # 매수호가 잔량
self.totOffer = 0 # 총매도잔량
self.totBid = 0 # 총매수 잔량
self.vol = 0 # 거래량
self.tvol = 0 # 순간 체결량
self.baseprice = 0 # 기준가
self.high = 0
self.low = 0
self.open = 0
self.volFlag = ord('0') # 체결매도/체결 매수 여부
self.time = 0
self.sum_buyvol = 0
self.sum_sellvol = 0
self.vol_str = 0
# 예상체결가 정보
self.exFlag = ord('2')
self.expcur = 0 # 예상체결가
self.expdiff = 0 # 예상 대비
self.expdiffp = 0 # 예상 대비율
self.expvol = 0 # 예상 거래량
self.objCur = CpPBStockCur()
self.objOfferbid = CpPBStockBid()
def __del__(self):
self.objCur.Unsubscribe()
self.objOfferbid.Unsubscribe()
# 전일 대비 계산
def makediffp(self):
lastday = 0
if (self.exFlag == ord('1')): # 동시호가 시간 (예상체결)
if self.baseprice > 0:
lastday = self.baseprice
else:
lastday = self.expcur - self.expdiff
if lastday:
self.expdiffp = (self.expdiff / lastday) * 100
else:
self.expdiffp = 0
else:
if self.baseprice > 0:
lastday = self.baseprice
else:
lastday = self.cur - self.diff
if lastday:
self.diffp = (self.diff / lastday) * 100
else:
self.diffp = 0
def getCurColor(self):
diff = self.diff
if (self.exFlag == ord('1')): # 동시호가 시간 (예상체결)
diff = self.expdiff
if (diff > 0):
return 'color: red'
elif (diff == 0):
return 'color: black'
elif (diff < 0):
return 'color: blue'
# CpEvent: 실시간 이벤트 수신 클래스
class CpEvent:
def set_params(self, client, name, rpMst, parent):
self.client = client # CP 실시간 통신 object
self.name = name # 서비스가 다른 이벤트를 구분하기 위한 이름
self.parent = parent # callback 을 위해 보관
self.rpMst = rpMst
# PLUS 로 부터 실제로 시세를 수신 받는 이벤트 핸들러
def OnReceived(self):
if self.name == "stockcur":
# 현재가 체결 데이터 실시간 업데이트
self.rpMst.exFlag = self.client.GetHeaderValue(19) # 예상체결 플래그
code = self.client.GetHeaderValue(0)
diff = self.client.GetHeaderValue(2)
cur = self.client.GetHeaderValue(13) # 현재가
vol = self.client.GetHeaderValue(9) # 거래량
# 예제는 장중만 처리 함.
if (self.rpMst.exFlag == ord('1')): # 동시호가 시간 (예상체결)
# 예상체결가 정보
self.rpMst.expcur = cur
self.rpMst.expdiff = diff
self.rpMst.expvol = vol
else:
self.rpMst.cur = cur
self.rpMst.diff = diff
self.rpMst.makediffp()
self.rpMst.vol = vol
self.rpMst.open = self.client.GetHeaderValue(4)
self.rpMst.high = self.client.GetHeaderValue(5)
self.rpMst.low = self.client.GetHeaderValue(6)
self.rpMst.tvol = self.client.GetHeaderValue(17)
self.rpMst.volFlag = self.client.GetHeaderValue(14) # '1' 매수 '2' 매도
self.rpMst.time = self.client.GetHeaderValue(18)
self.rpMst.sum_buyvol = self.client.GetHeaderValue(16) # 누적매수체결수량 (체결가방식)
self.rpMst.sum_sellvol = self.client.GetHeaderValue(15) # 누적매도체결수량 (체결가방식)
if (self.rpMst.sum_sellvol):
self.rpMst.volstr = self.rpMst.sum_buyvol / self.rpMst.sum_sellvol * 100
else:
self.rpMst.volstr = 0
self.rpMst.makediffp()
# 현재가 업데이트
self.parent.monitorPriceChange()
return
elif self.name == "stockbid":
# 현재가 10차 호가 데이터 실시간 업데이c
code = self.client.GetHeaderValue(0)
dataindex = [3, 7, 11, 15, 19, 27, 31, 35, 39, 43]
obi = 0
for i in range(10):
self.rpMst.offer[i] = self.client.GetHeaderValue(dataindex[i])
self.rpMst.bid[i] = self.client.GetHeaderValue(dataindex[i] + 1)
self.rpMst.offervol[i] = self.client.GetHeaderValue(dataindex[i] + 2)
self.rpMst.bidvol[i] = self.client.GetHeaderValue(dataindex[i] + 3)
self.rpMst.totOffer = self.client.GetHeaderValue(23)
self.rpMst.totBid = self.client.GetHeaderValue(24)
# 10차 호가 변경 call back 함수 호출
self.parent.monitorOfferbidChange()
return
# SB/PB 요청 ROOT 클래스
class CpPublish:
def __init__(self, name, serviceID):
self.name = name
self.obj = win32com.client.Dispatch(serviceID)
self.bIsSB = False
def Subscribe(self, var, rpMst, parent):
if self.bIsSB:
self.Unsubscribe()
if (len(var) > 0):
self.obj.SetInputValue(0, var)
handler = win32com.client.WithEvents(self.obj, CpEvent)
handler.set_params(self.obj, self.name, rpMst, parent)
self.obj.Subscribe()
self.bIsSB = True
def Unsubscribe(self):
if self.bIsSB:
self.obj.Unsubscribe()
self.bIsSB = False
# CpPBStockCur: 실시간 현재가 요청 클래스
class CpPBStockCur(CpPublish):
def __init__(self):
super().__init__("stockcur", "DsCbo1.StockCur")
# CpPBStockBid: 실시간 10차 호가 요청 클래스
class CpPBStockBid(CpPublish):
def __init__(self):
super().__init__("stockbid", "Dscbo1.StockJpBid")
# SB/PB 요청 ROOT 클래스
class CpPBConnection:
def __init__(self):
self.obj = win32com.client.Dispatch("CpUtil.CpCybos")
handler = win32com.client.WithEvents(self.obj, CpEvent)
handler.set_params(self.obj, "connection", None)
# CpRPCurrentPrice: 현재가 기본 정보 조회 클래스
class CpRPCurrentPrice:
def __init__(self):
if (g_objCpStatus.IsConnect == 0):
print("PLUS가 정상적으로 연결되지 않음. ")
return
self.objStockMst = win32com.client.Dispatch("DsCbo1.StockMst")
return
def Request(self, code, rtMst, callbackobj):
# 현재가 통신
rtMst.objCur.Unsubscribe()
rtMst.objOfferbid.Unsubscribe()
self.objStockMst.SetInputValue(0, code)
ret = self.objStockMst.BlockRequest()
if self.objStockMst.GetDibStatus() != 0:
print("통신상태", self.objStockMst.GetDibStatus(), self.objStockMst.GetDibMsg1())
return False
# 수신 받은 현재가 정보를 rtMst 에 저장
rtMst.code = code
rtMst.name = g_objCodeMgr.CodeToName(code)
rtMst.cur = self.objStockMst.GetHeaderValue(11) # 종가
rtMst.diff = self.objStockMst.GetHeaderValue(12) # 전일대비
rtMst.baseprice = self.objStockMst.GetHeaderValue(27) # 기준가
rtMst.vol = self.objStockMst.GetHeaderValue(18) # 거래량
rtMst.exFlag = self.objStockMst.GetHeaderValue(58) # 예상플래그
rtMst.expcur = self.objStockMst.GetHeaderValue(55) # 예상체결가
rtMst.expdiff = self.objStockMst.GetHeaderValue(56) # 예상체결대비
rtMst.makediffp()
rtMst.totOffer = self.objStockMst.GetHeaderValue(71) # 총매도잔량
rtMst.totBid = self.objStockMst.GetHeaderValue(73) # 총매수잔량
# 10차호가
for i in range(10):
rtMst.offer[i] = (self.objStockMst.GetDataValue(0, i)) # 매도호가
rtMst.bid[i] = (self.objStockMst.GetDataValue(1, i)) # 매수호가
rtMst.offervol[i] = (self.objStockMst.GetDataValue(2, i)) # 매도호가 잔량
rtMst.bidvol[i] = (self.objStockMst.GetDataValue(3, i)) # 매수호가 잔량
rtMst.objCur.Subscribe(code, rtMst, callbackobj)
rtMst.objOfferbid.Subscribe(code, rtMst, callbackobj)
# CpWeekList: 일자별 리스트 구하기
class CpWeekList:
def __init__(self):
self.objWeek = win32com.client.Dispatch("Dscbo1.StockWeek")
return
def Request(self, code, caller):
# 현재가 통신
self.objWeek.SetInputValue(0, code)
# 데이터들
dates = []
opens = []
highs = []
lows = []
closes = []
diffs = []
vols = []
diffps = []
foreign_vols = []
foreign_diff = []
foreign_p = []
# 누적 개수 - 100 개까지만 하자
sumCnt = 0
while True:
ret = self.objWeek.BlockRequest()
if self.objWeek.GetDibStatus() != 0:
print("통신상태", self.objWeek.GetDibStatus(), self.objWeek.GetDibMsg1())
return False
cnt = self.objWeek.GetHeaderValue(1)
sumCnt += cnt
if cnt == 0:
break
for i in range(cnt):
dates.append(self.objWeek.GetDataValue(0, i))
opens.append(self.objWeek.GetDataValue(1, i))
highs.append(self.objWeek.GetDataValue(2, i))
lows.append(self.objWeek.GetDataValue(3, i))
closes.append(self.objWeek.GetDataValue(4, i))
temp = self.objWeek.GetDataValue(5, i)
diffs.append(temp)
vols.append(self.objWeek.GetDataValue(6, i))
temp2 = self.objWeek.GetDataValue(10, i)
if (temp < 0):
temp2 *= -1
diffps.append(temp2)
foreign_vols.append(self.objWeek.GetDataValue(7, i)) # 외인보유
foreign_diff.append(self.objWeek.GetDataValue(8, i)) # 외인보유 전일대비
foreign_p.append(self.objWeek.GetDataValue(9, i)) # 외인비중
if (sumCnt > 100):
break
if self.objWeek.Continue == False:
break
if len(dates) == 0:
return False
caller.rpWeek = None
weekCol = {'close': closes,
'diff': diffs,
'diffp': diffps,
'vol': vols,
'open': opens,
'high': highs,
'low': lows,
'for_v': foreign_vols,
'for_d': foreign_diff,
'for_p': foreign_p,
}
caller.rpWeek = DataFrame(weekCol, index=dates)
return True
# CpStockBid: 시간대별 조회
class CpStockBid:
def __init__(self):
self.objSBid = win32com.client.Dispatch("Dscbo1.StockBid")
return
def Request(self, code, caller):
# 현재가 통신
self.objSBid.SetInputValue(0, code)
self.objSBid.SetInputValue(2, 80) # 요청개수 (최대 80)
self.objSBid.SetInputValue(3, ord('C')) # C 체결가 비교 방식 H 호가 비교방식
times = []
curs = []
diffs = []
tvols = []
offers = []
bids = []
vols = []
offerbidFlags = [] # 체결 상태 '1' 매수 '2' 매도
volstrs = [] # 체결강도
marketFlags = [] # 장구분 '1' 동시호가 예상체결' '2' 장중
# 누적 개수 - 100 개까지만 하자
sumCnt = 0
while True:
ret = self.objSBid.BlockRequest()
if self.objSBid.GetDibStatus() != 0:
print("통신상태", self.objSBid.GetDibStatus(), self.objSBid.GetDibMsg1())
return False
cnt = self.objSBid.GetHeaderValue(2)
sumCnt += cnt
if cnt == 0:
break
strcur = ""
strflag = ""
strflag2 = ""
for i in range(cnt):
cur = self.objSBid.GetDataValue(4, i)
times.append(self.objSBid.GetDataValue(9, i))
diffs.append(self.objSBid.GetDataValue(1, i))
vols.append(self.objSBid.GetDataValue(5, i))
tvols.append(self.objSBid.GetDataValue(6, i))
offers.append(self.objSBid.GetDataValue(2, i))
bids.append(self.objSBid.GetDataValue(3, i))
flag = self.objSBid.GetDataValue(7, i)
if (flag == ord('1')):
strflag = "체결매수"
else:
strflag = "체결매도"
offerbidFlags.append(strflag)
volstrs.append(self.objSBid.GetDataValue(8, i))
flag = self.objSBid.GetDataValue(10, i)
if (flag == ord('1')):
strflag2 = "예상체결"
# strcur = '*' + str(cur)
else:
strflag2 = "장중"
# strcur = str(cur)
marketFlags.append(strflag2)
curs.append(cur)
if (sumCnt > 100):
break
if self.objSBid.Continue == False:
break
if len(times) == 0:
return False
caller.rpStockBid = None
sBidCol = {'time': times,
'cur': curs,
'diff': diffs,
'vol': vols,
'tvol': tvols,
'offer': offers,
'bid': bids,
'flag': offerbidFlags,
'market': marketFlags,
'volstr': volstrs}
caller.rpStockBid = | DataFrame(sBidCol) | pandas.DataFrame |
import datetime as dt
import pandas as pd
import numpy as np
from pandas.plotting import table
import matplotlib.pyplot as plt
# Import the Lipper Hedge Fund Data
Lip = pd.read_csv(r'/Users/rachnish/Dropbox/TWSA Session #1 - Wed Nov 20/Kapil_Data.csv', index_col='Date')
# format the date columns to datetime format
Lip['Performance Start Date'] = pd.to_datetime(Lip['Performance Start Date'], errors='raise', dayfirst=True)
Lip['Performance End Date'] = | pd.to_datetime(Lip['Performance End Date'], errors='raise', dayfirst=True) | pandas.to_datetime |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = | DataFrame(['00:00:01']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error,mean_absolute_error
from statsmodels.tsa.api import ExponentialSmoothing
from . helpers import create_train_test,seasonal_options
import pickle
from . BBDD import new_model, get_best_model
from struct import *
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(len(seq[int(last):int(last + avg)]))
last += avg
return out
def anomaly_holt(lista_datos,num_fut,desv_mse=0,name='NA'):
lista_puntos = np.arange(0, len(lista_datos),1)
df, df_train, df_test = create_train_test(lista_puntos, lista_datos)
engine_output={}
####################ENGINE START
stepwise_model = ExponentialSmoothing(df_train['valores'],seasonal_periods=1 )
fit_stepwise_model = stepwise_model.fit()
fit_forecast_pred_full = fit_stepwise_model.fittedvalues
future_forecast_pred = fit_stepwise_model.forecast(len(df_test['valores']))
###### sliding windows
#ventanas=h.windows(lista_datos,10)
#print(ventanas[0])
#training_data=[]
#count=0
#forecast_pred10 =[]
#real_pred10=[]
#for slot in ventanas:
#if count != 0:
#stepwise_model = ExponentialSmoothing(training_data,seasonal_periods=1 )
#fit_stepwise_model = stepwise_model.fit()
#future_forecast_pred = fit_stepwise_model.forecast(len(slot))
#forecast_pred10.extend(future_forecast_pred)
#real_pred10.extend(slot)
#training_data.extend(slot)
#else:
#training_data.extend(slot)
#forecast_pred10.extend(slot)
#real_pred10.extend(slot)
#count=1
#print ('Wndows prediction')
##print ( forecast_pred10)
##print ( real_pred10)
#print ('Wndows mae ' + str(mean_absolute_error(forecast_pred10, real_pred10)))
####################ENGINE START
##########GRID to find seasonal n_periods
mae_period = 99999999
best_period=0
best_trend='null'
#list_trend=['add','mul','additive','multiplicative']
list_trend=['add','mul', 'additive', 'multiplicative'] #,'None']
print ("pasa hasta aqui")
periods = seasonal_options(df.valores)
print (periods)
#for trend_val in list_trend:
for seasonal_val in list_trend:
for period in periods:
print ('Periodo', period)
list_forecast_camb = []
tam_train = int(len(df)*0.7)
df_test = df[tam_train:]
part_lenghts = chunkIt(range(len(df_test)),3)
for i in part_lenghts:
print ('Prediccion punto ', i)
df_train_camb = df[:tam_train+i]
stepwise_model_camb = ExponentialSmoothing(df_train_camb['valores'] , seasonal=seasonal_val ,seasonal_periods=period ).fit()
forecast_camb = stepwise_model_camb.forecast(i)
list_forecast_camb.extend(forecast_camb.values[:i])
mae_temp = mean_absolute_error(list_forecast_camb,df_test['valores'].values)
if mae_temp < mae_period:
best_period = period
# best_trend = trend_val
best_seasonal = seasonal_val
print ('best_period',best_period)
# print ('best_trend', best_trend)
print ('mae_temp', mae_temp)
print ('best_seasonal', best_seasonal)
mae_period = mae_temp
else:
print ('aa')
print ("######best mae is " + str(mae_period) + " with the period " + str(best_period)+ " trend " + best_trend)
stepwise_model = ExponentialSmoothing(df_train['valores'],seasonal_periods=best_period , seasonal=best_seasonal )
fit_stepwise_model = stepwise_model.fit()
future_forecast_pred = fit_stepwise_model.forecast(len(df_test['valores']))
print (future_forecast_pred.values)
list_test = df_test['valores'].values
mse_test = (future_forecast_pred - list_test)
test_values = pd.DataFrame(future_forecast_pred,index = df_test.index,columns=['expected value'])
print(list_test)
mse = mean_squared_error(future_forecast_pred.values,list_test)
print('Model_test mean error: {}'.format(mse))
rmse = np.sqrt(mse)
print('Model_test root error: {}'.format(rmse))
mse_abs_test = abs(mse_test)
df_aler = pd.DataFrame(future_forecast_pred,index = df.index,columns=['expected value'])
df_aler['step'] = df['puntos']
df_aler['real_value'] = df_test['valores']
df_aler['mse'] = mse
df_aler['rmse'] = rmse
df_aler['mae'] = mean_absolute_error(list_test, future_forecast_pred)
df_aler['anomaly_score'] = abs(df_aler['expected value'] - df_aler['real_value']) / df_aler['mae']
df_aler_ult = df_aler[:5]
df_aler_ult = df_aler_ult[(df_aler_ult.index==df_aler.index.max())|(df_aler_ult.index==((df_aler.index.max())-1))
|(df_aler_ult.index==((df_aler.index.max())-2))|(df_aler_ult.index==((df_aler.index.max())-3))
|(df_aler_ult.index==((df_aler.index.max())-4))]
if len(df_aler_ult) == 0:
exists_anom_last_5 = 'FALSE'
else:
exists_anom_last_5 = 'TRUE'
df_aler = df_aler[(df_aler['anomaly_score']> 2)]
max = df_aler['anomaly_score'].max()
min = df_aler['anomaly_score'].min()
df_aler['anomaly_score']= ( df_aler['anomaly_score'] - min ) /(max - min)
max = df_aler_ult['anomaly_score'].max()
min = df_aler_ult['anomaly_score'].min()
df_aler_ult['anomaly_score']= ( df_aler_ult['anomaly_score'] - min ) /(max - min)
print ("Anomaly finished. Start forecasting")
stepwise_model1 = ExponentialSmoothing(df['valores'],seasonal_periods=best_period,seasonal=best_seasonal)
print ("Pass the training")
fit_stepwise_model1 = stepwise_model1.fit()
#with open('./models_temp/learned_model.pkl','w') as f:
# pickle.dump(results,f)
filename='./models_temp/learned_model_holt_winters'+name
with open(filename,'w') as f:
f.write(str(best_period)+','+str(best_trend))
f.close()
new_model(name, 'Holtwinters', pack('N', 365),str(best_period)+','+str(best_trend),mae_period)
future_forecast_pred1 = fit_stepwise_model1.forecast(num_fut)
print ("Pass the forecast")
engine_output['rmse'] = rmse
engine_output['mse'] = mse
engine_output['mae'] = mean_absolute_error(list_test, future_forecast_pred)
engine_output['present_status']=exists_anom_last_5
engine_output['present_alerts']=df_aler_ult.fillna(0).to_dict(orient='record')
engine_output['past']=df_aler.fillna(0).to_dict(orient='record')
engine_output['engine']='Holtwinters'
print ("Only for future")
df_future= pd.DataFrame(future_forecast_pred1,columns=['value'])
df_future['value']=df_future.value.astype("float32")
df_future['step']= np.arange( len(lista_datos),len(lista_datos)+num_fut,1)
engine_output['future'] = df_future.to_dict(orient='record')
test_values['step'] = test_values.index
print ("debug de Holtwinters")
print (test_values)
engine_output['debug'] = test_values.to_dict(orient='record')
print ("la prediccion es")
print (df_future)
return engine_output
def forecast_holt(lista_datos,num_fut,desv_mse=0,name='NA'):
lista_puntos = np.arange(0, len(lista_datos),1)
df, df_train, df_test = create_train_test(lista_puntos, lista_datos)
engine_output={}
best_period=0
#stepwise_model = ExponentialSmoothing(df_train['valores'],seasonal_periods=best_period ,trend='add', seasonal='add', )
#fit_stepwise_model = stepwise_model.fit()
filename='./models_temp/learned_model_holt_winters'+name
with open(filename,'r') as f:
best_period, best_trend= f.read().split(",")
best_period=int(best_period)
best_trend=best_trend
f.close()
(model_name,model,params)=get_best_model(name)
print("parametros" + params)
best_period, best_trend=params.split(",")
best_period=int(best_period)
best_trend=best_trend
print("el dato es ")
print (str(best_period))
stepwise_model = ExponentialSmoothing(df_train['valores'],seasonal_periods=best_period ,trend='add', seasonal='add', )
fit_stepwise_model = stepwise_model.fit()
future_forecast_pred = fit_stepwise_model.forecast(len(df_test['valores']))
print (future_forecast_pred.values)
list_test = df_test['valores'].values
mse_test = (future_forecast_pred - list_test)
test_values = pd.DataFrame(future_forecast_pred,index = df_test.index,columns=['expected value'])
print(list_test)
mse = mean_squared_error(future_forecast_pred.values,list_test)
print('Model_test mean error: {}'.format(mse))
rmse = np.sqrt(mse)
print('Model_test root error: {}'.format(rmse))
mse_abs_test = abs(mse_test)
df_aler = pd.DataFrame(future_forecast_pred,index = df.index,columns=['expected value'])
df_aler['step'] = df['puntos']
df_aler['real_value'] = df_test['valores']
df_aler['mse'] = mse
df_aler['rmse'] = rmse
df_aler['mae'] = mean_absolute_error(list_test, future_forecast_pred)
df_aler['anomaly_score'] = abs(df_aler['expected value'] - df_aler['real_value']) / df_aler['mae']
df_aler_ult = df_aler[:5]
df_aler_ult = df_aler_ult[(df_aler_ult.index==df_aler.index.max())|(df_aler_ult.index==((df_aler.index.max())-1))
|(df_aler_ult.index==((df_aler.index.max())-2))|(df_aler_ult.index==((df_aler.index.max())-3))
|(df_aler_ult.index==((df_aler.index.max())-4))]
if len(df_aler_ult) == 0:
exists_anom_last_5 = 'FALSE'
else:
exists_anom_last_5 = 'TRUE'
df_aler = df_aler[(df_aler['anomaly_score']> 2)]
max = df_aler['anomaly_score'].max()
min = df_aler['anomaly_score'].min()
df_aler['anomaly_score']= ( df_aler['anomaly_score'] - min ) /(max - min)
max = df_aler_ult['anomaly_score'].max()
min = df_aler_ult['anomaly_score'].min()
df_aler_ult['anomaly_score']= ( df_aler_ult['anomaly_score'] - min ) /(max - min)
print ("Anomaly finished. Start forecasting")
stepwise_model1 = ExponentialSmoothing(df['valores'],seasonal_periods=best_period , seasonal='add')
print ("Pass the training")
fit_stepwise_model1 = stepwise_model1.fit()
future_forecast_pred1 = fit_stepwise_model1.forecast(num_fut)
print ("Pass the forecast")
engine_output['rmse'] = rmse
engine_output['mse'] = mse
engine_output['mae'] = mean_absolute_error(list_test, future_forecast_pred)
engine_output['present_status']=exists_anom_last_5
engine_output['present_alerts']=df_aler_ult.fillna(0).to_dict(orient='record')
engine_output['past']=df_aler.fillna(0).to_dict(orient='record')
engine_output['engine']='Holtwinters'
print ("Only for future")
df_future= | pd.DataFrame(future_forecast_pred1,columns=['value']) | pandas.DataFrame |
from datetime import datetime
import pandas as pd
import pytest
from dask import dataframe as dd
import featuretools as ft
from featuretools import Relationship
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import import_or_none
ks = import_or_none('databricks.koalas')
@pytest.fixture
def values_es(es):
es.normalize_entity('log', 'values', 'value',
make_time_index=True,
new_entity_time_index="value_time")
return es
@pytest.fixture
def true_values_lti():
true_values_lti = pd.Series([datetime(2011, 4, 10, 10, 41, 0),
datetime(2011, 4, 9, 10, 31, 9),
datetime(2011, 4, 9, 10, 31, 18),
datetime(2011, 4, 9, 10, 31, 27),
datetime(2011, 4, 10, 10, 40, 1),
datetime(2011, 4, 10, 10, 41, 3),
datetime(2011, 4, 9, 10, 30, 12),
datetime(2011, 4, 10, 10, 41, 6),
datetime(2011, 4, 9, 10, 30, 18),
datetime(2011, 4, 9, 10, 30, 24),
datetime(2011, 4, 10, 11, 10, 3)])
return true_values_lti
@pytest.fixture
def true_sessions_lti():
sessions_lti = pd.Series([datetime(2011, 4, 9, 10, 30, 24),
datetime(2011, 4, 9, 10, 31, 27),
datetime(2011, 4, 9, 10, 40, 0),
datetime(2011, 4, 10, 10, 40, 1),
datetime(2011, 4, 10, 10, 41, 6),
datetime(2011, 4, 10, 11, 10, 3)])
return sessions_lti
@pytest.fixture
def wishlist_df():
wishlist_df = pd.DataFrame({
"session_id": [0, 1, 2, 2, 3, 4, 5],
"datetime": [datetime(2011, 4, 9, 10, 30, 15),
datetime(2011, 4, 9, 10, 31, 30),
datetime(2011, 4, 9, 10, 30, 30),
datetime(2011, 4, 9, 10, 35, 30),
datetime(2011, 4, 10, 10, 41, 0),
datetime(2011, 4, 10, 10, 39, 59),
datetime(2011, 4, 10, 11, 10, 2)],
"product_id": ['coke zero', 'taco clock', 'coke zero', 'car',
'toothpaste', 'brown bag', 'coke zero'],
})
return wishlist_df
@pytest.fixture
def extra_session_df(es):
row_values = {'customer_id': 2,
'device_name': 'PC',
'device_type': 0,
'id': 6}
row = pd.DataFrame(row_values, index=pd.Index([6], name='id'))
df = to_pandas(es['sessions'].df)
df = df.append(row, sort=True).sort_index()
if isinstance(es['sessions'].df, dd.DataFrame):
df = dd.from_pandas(df, npartitions=3)
if ks and isinstance(es['sessions'].df, ks.DataFrame):
df = ks.from_pandas(df)
return df
class TestLastTimeIndex(object):
def test_leaf(self, es):
es.add_last_time_indexes()
log = es['log']
assert len(log.last_time_index) == 17
log_df = to_pandas(log.df)
log_lti = to_pandas(log.last_time_index)
for v1, v2 in zip(log_lti, log_df['datetime']):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_leaf_no_time_index(self, es):
es.add_last_time_indexes()
stores = es['stores']
true_lti = pd.Series([None for x in range(6)], dtype='datetime64[ns]')
assert len(true_lti) == len(stores.last_time_index)
stores_lti = to_pandas(stores.last_time_index)
for v1, v2 in zip(stores_lti, true_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
# TODO: possible issue with either normalize_entity or add_last_time_indexes
def test_parent(self, values_es, true_values_lti):
# test entity with time index and all instances in child entity
if not all(isinstance(entity.df, pd.DataFrame) for entity in values_es.entities):
pytest.xfail('possible issue with either normalize_entity or add_last_time_indexes')
values_es.add_last_time_indexes()
values = values_es['values']
assert len(values.last_time_index) == 11
sorted_lti = to_pandas(values.last_time_index).sort_index()
for v1, v2 in zip(sorted_lti, true_values_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
# TODO: fails with Dask, tests needs to be reworked
def test_parent_some_missing(self, values_es, true_values_lti):
# test entity with time index and not all instances have children
if not all(isinstance(entity.df, pd.DataFrame) for entity in values_es.entities):
pytest.xfail('fails with Dask, tests needs to be reworked')
values = values_es['values']
# add extra value instance with no children
row_values = {'value': 21.0,
'value_time': pd.Timestamp("2011-04-10 11:10:02"),
'values_id': 11}
# make sure index doesn't have same name as column to suppress pandas warning
row = pd.DataFrame(row_values, index=pd.Index([11]))
df = values.df.append(row, sort=True)
df = df[['value', 'value_time']].sort_values(by='value')
df.index.name = 'values_id'
values.update_data(df)
values_es.add_last_time_indexes()
# lti value should default to instance's time index
true_values_lti[10] = pd.Timestamp("2011-04-10 11:10:02")
true_values_lti[11] = pd.Timestamp("2011-04-10 11:10:03")
assert len(values.last_time_index) == 12
sorted_lti = values.last_time_index.sort_index()
for v1, v2 in zip(sorted_lti, true_values_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_parent_no_time_index(self, es, true_sessions_lti):
# test entity without time index and all instances have children
es.add_last_time_indexes()
sessions = es['sessions']
assert len(sessions.last_time_index) == 6
sorted_lti = to_pandas(sessions.last_time_index).sort_index()
for v1, v2 in zip(sorted_lti, true_sessions_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_parent_no_time_index_missing(self, es, extra_session_df,
true_sessions_lti):
# test entity without time index and not all instance have children
sessions = es['sessions']
# add session instance with no associated log instances
sessions.update_data(extra_session_df)
es.add_last_time_indexes()
# since sessions has no time index, default value is NaT
true_sessions_lti[6] = pd.NaT
assert len(sessions.last_time_index) == 7
sorted_lti = to_pandas(sessions.last_time_index).sort_index()
for v1, v2 in zip(sorted_lti, true_sessions_lti):
assert ( | pd.isnull(v1) | pandas.isnull |
#!/usr/bin/env python3
# compare event timing from adult raters with event timing from children raters
import numpy as np
import pandas as pd
from settings import *
def get_boundaries(df,agedf):
PartIDs = np.array(df.loc[df['Screen Name'] == 'Desc_Me']['Participant Public ID'].value_counts()[df.loc[df['Screen Name'] == 'Desc_Me']['Participant Public ID'].value_counts()>1].index)
df=df[df['Participant Public ID'].isin(PartIDs)]
agedf = agedf[agedf['Participant Public ID'].isin(PartIDs)]
boundaries = pd.to_numeric(df.loc[df['Screen Name'] == 'Desc_Me']['Reaction Time']).values
spike_boundaries = np.round(boundaries/1000/TR,0).astype(int)
counts = np.append(np.bincount(spike_boundaries)[:-2],np.bincount(spike_boundaries)[-1])
ev_conv = np.convolve(counts,hrf)[:nTR]
# Subject ages:
Ages = []
for sub in PartIDs:
subdf = agedf[agedf['Participant Public ID'].isin([sub])]
Ages.append(pd.to_numeric(subdf[subdf['Question Key']=='age-year']['Response'].values)[0] + pd.to_numeric(subdf[subdf['Question Key']=='age-month']['Response'].values)[0] / 12)
return spike_boundaries,ev_conv,Ages,df,agedf
def xcorr(a,b):
# This helped convince me I'm doing the right thing:
# https://currents.soest.hawaii.edu/ocn_data_analysis/_static/SEM_EDOF.html
a = (a - np.mean(a)) / (np.std(a))
b = (b - np.mean(b)) / (np.std(b))
c = np.correlate(a, b, 'full')/max(len(a),len(b))
return c
segpath = codedr + 'HBN_fmriprep_code/video_segmentation/'
ev_figpath = figurepath+'event_annotations/'
nTR = 750
TR = 0.8
# HRF (from AFNI)
dt = np.arange(0, 15,TR)
p = 8.6
q = 0.547
hrf = np.power(dt / (p * q), p) * np.exp(p - dt / q)
eventdict = {key:{} for key in ['timing','annotation']}
for csv in glob.glob(segpath+'*csv'):
initials = csv.split('/')[-1].split('-')[0]
df = pd.read_csv(csv)
if not any('TR' in c for c in df.columns):
df.columns = df.iloc[0]
df = df.iloc[1:]
df = df.loc[:, df.columns.notnull()]
TRstr = [t for t in df.columns if 'TR' in t][0]
if TRstr != 'TR':
df = df[(df['Scene Title '].notna()) & (df['Start TR'].notna())]
df = df.rename(columns={'Scene Title ': 'Segment details'})
eventdict['timing'][initials] = [int(tr) for tr in list(df[TRstr]) if not | pd.isnull(tr) | pandas.isnull |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp(
'2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00',
tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp(
'2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(value=5), exp)
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
from pandas import tslib
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
self.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')
self.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
self.assert_series_equal(result, s)
self.assertFalse(result is s)
s2 = s.copy()
s2.dropna(inplace=True)
self.assert_series_equal(s2, s)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notnull(ts)])
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isnull(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notnull(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
class TestSeriesInterpolateData(TestData, tm.TestCase):
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interpolate_pchip(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_akima(self):
tm._skip_if_no_scipy()
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_from_derivatives(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
if not isinstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that get_values wraps.
Examples
--------
Getting the `Index` values of a `DataFrame`:
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> df
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> df.index.get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
>>> idx.get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.get_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
pandas.Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.astype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.any(isna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._ndarray_values.ravel(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desrired
try:
res = data.astype('i8', copy=False)
if (res == data).all():
return Int64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.astype('u8', copy=False)
if (res == data).all():
return UInt64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return {k: getattr(self, k, None) for k in self._attributes}
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
def __copy__(self, **kwargs):
return self.copy(**kwargs)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join(u("%s=%s") % (k, v) for k, v in attrs)
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatter function
"""
return default_pprint
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
return format_object_attrs(self)
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
index : Index, optional
index of resulting Series. If None, defaults to original index
name : string, optional
name of resulting Series. If None, defaults to name of original
index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
return Series(self._to_embed(), index=index, name=name)
def to_frame(self, index=True):
"""
Create a DataFrame with a column containing the Index.
.. versionadded:: 0.21.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index.
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
result = DataFrame(self._shallow_copy(), columns=[self.name or 0])
if index:
result.index = self
return result
def _to_embed(self, keep_tz=False, dtype=None):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
return self.values.copy()
_index_shared_docs['astype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype):
return self.copy() if copy else self
elif is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(self.values, name=self.name, dtype=dtype,
copy=copy)
try:
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
except (TypeError, ValueError):
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not is_scalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError('Names must be a list-like')
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
# GH 20527
# All items in 'name' need to be hashable:
for name in values:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
from .multi import MultiIndex
if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def _summary(self, name=None):
"""
Return a summarized representation
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def summary(self, name=None):
"""
Return a summarized representation
.. deprecated:: 0.23.0
"""
warnings.warn("'summary' is deprecated and will be removed in a "
"future version.", FutureWarning, stacklevel=2)
return self._summary(name)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
Examples
--------
>>> Index([1, 2, 3]).is_monotonic_increasing
True
>>> Index([1, 2, 2]).is_monotonic_increasing
True
>>> Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
Examples
--------
>>> Index([3, 2, 1]).is_monotonic_decreasing
True
>>> Index([3, 2, 2]).is_monotonic_decreasing
True
>>> Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@property
def _is_strictly_monotonic_increasing(self):
"""return if the index is strictly monotonic increasing
(only increasing) values
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
@property
def _is_strictly_monotonic_decreasing(self):
"""return if the index is strictly monotonic decreasing
(only decreasing) values
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
"""
Check if the Index holds categorical data.
Returns
-------
boolean
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
Examples
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
return self.inferred_type in ['categorical']
def is_interval(self):
return self.inferred_type in ['interval']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
_index_shared_docs['_convert_scalar_indexer'] = """
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
# on anything but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
_index_shared_docs['_convert_slice_indexer'] = """
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'getitem':
"""
called from the getitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used anywhere
if start is not None:
i = self.get_loc(start) # noqa
if stop is not None:
j = self.get_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step, kind=kind)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
if isinstance(keyarr, Index):
keyarr = self._convert_index_indexer(keyarr)
else:
keyarr = self._convert_arr_indexer(keyarr)
indexer = self._convert_list_indexer(keyarr, kind=kind)
return indexer, keyarr
_index_shared_docs['_convert_arr_indexer'] = """
Convert an array-like indexer to the appropriate dtype.
Parameters
----------
keyarr : array-like
Indexer to convert.
Returns
-------
converted_keyarr : array-like
"""
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = com._asarray_tuplesafe(keyarr)
return keyarr
_index_shared_docs['_convert_index_indexer'] = """
Convert an Index indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
Returns
-------
converted_keyarr : Index (or sub-class)
"""
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return keyarr
_index_shared_docs['_convert_list_indexer'] = """
Convert a list-like indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
kind : iloc, ix, loc, optional
Returns
-------
positional indexer or None
"""
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
if (kind in [None, 'iloc', 'ix'] and
is_integer_dtype(keyarr) and not self.is_floating() and
not isinstance(keyarr, ABCPeriodIndex)):
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
# missing values are flagged as -1 by get_indexer and negative
# indices are already converted to positive indices in the
# above if-statement, so the negative flags are changed to
# values outside the range of indices so as to trigger an
# IndexError in maybe_convert_indices
indexer[indexer < 0] = len(self)
from pandas.core.indexing import maybe_convert_indices
return maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)
return keyarr
return None
def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(
form=form, klass=type(self), key=key,
kind=type(key)))
def get_duplicates(self):
"""
Extract duplicated index elements.
Returns a sorted list of index elements which appear more than once in
the index.
.. deprecated:: 0.23.0
Use idx[idx.duplicated()].unique() instead
Returns
-------
array-like
List of duplicated indexes.
See Also
--------
Index.duplicated : Return boolean array denoting duplicates.
Index.drop_duplicates : Return Index with duplicates removed.
Examples
--------
Works on different Index of types.
>>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates()
[2, 3]
>>> pd.Index([1., 2., 2., 3., 3., 3., 4.]).get_duplicates()
[2.0, 3.0]
>>> pd.Index(['a', 'b', 'b', 'c', 'c', 'c', 'd']).get_duplicates()
['b', 'c']
Note that for a DatetimeIndex, it does not return a list but a new
DatetimeIndex:
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03',
... '2018-01-03', '2018-01-04', '2018-01-04'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex(['2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', freq=None)
Sorts duplicated elements even when indexes are unordered.
>>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates()
[2, 3]
Return empty array-like structure when all elements are unique.
>>> pd.Index([1, 2, 3, 4]).get_duplicates()
[]
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex([], dtype='datetime64[ns]', freq=None)
"""
warnings.warn("'get_duplicates' is deprecated and will be removed in "
"a future release. You can use "
"idx[idx.duplicated()].unique() instead",
FutureWarning, stacklevel=2)
return self[self.duplicated()].unique()
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _constructor(self):
return type(self)
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self._ndarray_values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level, ))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)' %
(level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
return self.is_object()
def is_type_compatible(self, kind):
return kind == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
_index_shared_docs['__contains__'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
_index_shared_docs['contains'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
try:
return key in self._engine
except (TypeError, ValueError):
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if is_scalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if com.is_bool_indexer(key):
key = np.asarray(key)
key = com._values_from_object(key)
result = getitem(key)
if not is_scalar(result):
return promote(result)
else:
return result
def _can_hold_identifiers_and_holds_name(self, name):
"""
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__getattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/pandas-dev/pandas/issues/19764
"""
if self.is_object() or self.is_categorical():
return name in self
return False
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError('all inputs must be Index')
names = {obj.name for obj in to_concat}
name = None if len(names) > 1 else self.name
return self._concat(to_concat, name)
def _concat(self, to_concat, name):
typs = _concat.get_dtype_kinds(to_concat)
if len(typs) == 1:
return self._concat_same_dtype(to_concat, name=name)
return _concat._concat_index_asobject(to_concat, name=name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
"""
# must be overridden in specific classes
return _concat._concat_index_asobject(to_concat, name)
_index_shared_docs['take'] = """
return a new %(klass)s of the values selected by the indices
For internal compatibility with numpy arrays.
Parameters
----------
indices : list
Indices to be taken
axis : int, optional
The axis over which to select values, always 0.
allow_fill : bool, default True
fill_value : bool, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 is regarded as NA. If Index doesn't hold NA, raise ValueError
See also
--------
numpy.ndarray.take
"""
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
if kwargs:
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
if self._can_hold_na:
taken = self._assert_take_fillable(self.values, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=self._na_value)
else:
if allow_fill and fill_value is not None:
msg = 'Unable to fill values because {0} cannot contain NA'
raise ValueError(msg.format(self.__class__.__name__))
taken = self.values.take(indices)
return self._shallow_copy(taken)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
""" Internal method to handle NA filling of take """
indices = _ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = algos.take(values,
indices,
allow_fill=allow_fill,
fill_value=na_value)
else:
taken = values.take(indices)
return taken
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
if self._can_hold_na:
return isna(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def _nan_idxs(self):
if self._can_hold_na:
w, = self._isnan.nonzero()
return w
else:
return np.array([], dtype=np.int64)
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
if self._can_hold_na:
return self._isnan.any()
else:
return False
def isna(self):
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get
mapped to ``True`` values.
Everything else get mapped to ``False`` values. Characters such as
empty strings `''` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
A boolean array of whether my values are NA
See Also
--------
pandas.Index.notna : boolean inverse of isna.
pandas.Index.dropna : omit entries with missing values.
pandas.isna : top-level isna.
Series.isna : detect missing values in Series object.
Examples
--------
Show which entries in a pandas.Index are NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.isna()
array([False, False, True], dtype=bool)
Empty strings are not considered NA values. None is considered an NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.isna()
array([False, False, False, True], dtype=bool)
For datetimes, `NaT` (Not a Time) is considered as an NA value.
>>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),
... pd.Timestamp(''), None, pd.NaT])
>>> idx
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]', freq=None)
>>> idx.isna()
array([False, True, True, True], dtype=bool)
"""
return self._isnan
isnull = isna
def notna(self):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to ``True``. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``
values.
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
Boolean array to indicate which entries are not NA.
See also
--------
Index.notnull : alias of notna
Index.isna: inverse of notna
pandas.notna : top-level notna
Examples
--------
Show which entries in an Index are not NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.notna()
array([ True, True, False])
Empty strings are not considered NA values. None is considered a NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.notna()
array([ True, True, True, False])
"""
return ~self.isna()
notnull = notna
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self._shallow_copy(values)
except (ValueError, TypeError) as err:
if is_object_dtype(self):
raise err
# coerces to object
return self.astype(object).putmask(mask, value)
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
from pandas.io.formats.format import format_array
if is_categorical_dtype(values.dtype):
values = np.array(values)
elif is_object_dtype(values.dtype):
values = lib.maybe_convert_objects(values, safe=1)
if is_object_dtype(values.dtype):
result = [pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = isna(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
def to_native_types(self, slicer=None, **kwargs):
"""
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values
"""
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
mask = isna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if is_object_dtype(self) and not is_object_dtype(other):
# if other is not object, use other's logic for coercion
return other.equals(self)
try:
return array_equivalent(com._values_from_object(self),
com._values_from_object(other))
except Exception:
return False
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
all((getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found.
See also
--------
get_loc : asof is a thin wrapper around get_loc with method='pad'
"""
try:
loc = self.get_loc(label, method='pad')
except KeyError:
return self._na_value
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def sort_values(self, return_indexer=False, ascending=True):
"""
Return a sorted copy of the index.
Return a sorted copy of the index, and optionally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
Returns
-------
sorted_index : pandas.Index
Sorted copy of the index.
indexer : numpy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
pandas.Series.sort_values : Sort values of a Series.
pandas.DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> idx = pd.Index([10, 100, 1, 1000])
>>> idx
Int64Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Int64Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also get the indices `idx` was
sorted by.
>>> idx.sort_values(ascending=False, return_indexer=True)
(Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def sort(self, *args, **kwargs):
raise TypeError("cannot sort an Index object in-place, use "
"sort_values instead")
def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
-------
sorted_index : Index
"""
return self.sort_values(return_indexer=True, ascending=ascending)
def shift(self, periods=1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.Index
shifted index
See Also
--------
Series.shift : Shift values of Series.
Examples
--------
Put the first 5 month starts of 2011 into an index.
>>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')
>>> month_starts
DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',
'2011-05-01'],
dtype='datetime64[ns]', freq='MS')
Shift the index by 10 days.
>>> month_starts.shift(10, freq='D')
DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',
'2011-05-11'],
dtype='datetime64[ns]', freq=None)
The default value of `freq` is the `freq` attribute of the index,
which is 'MS' (month start) in this example.
>>> month_starts.shift(10)
DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',
'2012-03-01'],
dtype='datetime64[ns]', freq='MS')
Notes
-----
This method is only implemented for datetime-like index classes,
i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.
"""
raise NotImplementedError("Not supported for type %s" %
type(self).__name__)
def argsort(self, *args, **kwargs):
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
numpy.ndarray
Integer indices that would sort the index if used as
an indexer.
See also
--------
numpy.argsort : Similar method for NumPy arrays.
Index.sort_values : Return sorted copy of Index.
Examples
--------
>>> idx = pd.Index(['b', 'a', 'd', 'c'])
>>> idx
Index(['b', 'a', 'd', 'c'], dtype='object')
>>> order = idx.argsort()
>>> order
array([1, 0, 3, 2])
>>> idx[order]
Index(['a', 'b', 'c', 'd'], dtype='object')
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
return Index(np.array(self) + other)
def __radd__(self, other):
return Index(other + np.array(self))
def __iadd__(self, other):
# alias for __add__
return self + other
def __sub__(self, other):
raise TypeError("cannot perform __sub__ with this index type: "
"{typ}".format(typ=type(self).__name__))
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def _get_consensus_name(self, other):
"""
Given 2 indexes, give a consensus name meaning
we take the not None one, or None if the names differ.
Return a new object if we are resetting the name
"""
if self.name != other.name:
if self.name is None or other.name is None:
name = self.name or other.name
else:
name = None
if self.name != name:
return self._shallow_copy(name=name)
return self
def union(self, other):
"""
Form the union of two Index objects and sorts if possible.
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if len(other) == 0 or self.equals(other):
return self._get_consensus_name(other)
if len(self) == 0:
return other._get_consensus_name(self)
# TODO: is_dtype_union_equal is a hack around
# 1. buggy set ops with duplicates (GH #13432)
# 2. CategoricalIndex lacking setops (GH #10186)
# Once those are fixed, this workaround can be removed
if not is_dtype_union_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.union(other)
# TODO(EA): setops-refactor, clean all this up
if is_period_dtype(self) or is_datetime64tz_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other) or is_datetime64tz_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(lvals, rvals)[0]
except TypeError:
# incomparable objects
result = list(lvals)
# worth making this faster? a very unusual case
value_set = set(lvals)
result.extend([x for x in rvals if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
other_diff = algos.take_nd(rvals, indexer,
allow_fill=False)
result = _concat._concat_compat((lvals, other_diff))
try:
lvals[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = lvals
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(result, name=name)
def intersection(self, other):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`,
preserving the order of the calling index.
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self._get_consensus_name(other)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.intersection(other)
# TODO(EA): setops-refactor, clean all this up
if is_period_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(lvals, rvals)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = Index(rvals).get_indexer(lvals)
indexer = indexer.take((indexer != -1).nonzero()[0])
except Exception:
# duplicates
indexer = algos.unique1d(
Index(rvals).get_indexer_non_unique(lvals)[0])
indexer = indexer[indexer != -1]
taken = other.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
Returns
-------
difference : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
"""
self._assert_can_do_setop(other)
if self.equals(other):
return self._shallow_copy([])
other, result_name = self._convert_can_do_setop(other)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
assume_unique=True)
the_diff = this.values.take(label_diff)
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
return this._shallow_copy(the_diff, name=result_name, freq=None)
def symmetric_difference(self, other, result_name=None):
"""
Compute the symmetric difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
result_name : str
Returns
-------
symmetric_difference : Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
this = self._get_unique_index()
other = other._get_unique_index()
indexer = this.get_indexer(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(np.arange(this.size), common_indexer,
assume_unique=True)
left_diff = this.values.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.values.take(right_indexer)
the_diff = _concat._concat_compat([left_diff, right_diff])
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
attribs = self._get_attributes_dict()
attribs['name'] = result_name
if 'freq' in attribs:
attribs['freq'] = None
return self._shallow_copy_with_infer(the_diff, **attribs)
def _get_unique_index(self, dropna=False):
"""
Returns an index containing unique values.
Parameters
----------
dropna : bool
If True, NaN values are dropped.
Returns
-------
uniques : index
"""
if self.is_unique and not dropna:
return self
values = self.values
if not self.is_unique:
values = self.unique()
if dropna:
try:
if self.hasnans:
values = values[~isna(values)]
except NotImplementedError:
pass
return self._shallow_copy(values)
_index_shared_docs['get_loc'] = """
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
Tolerance may be a scalar
value, which applies the same tolerance to all values, or
list-like, which applies variable tolerance per element. List-like
includes list, tuple, array, Series, and must be the same size as
the index and its dtype must exactly match the index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> unique_index = pd.Index(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.Index(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = pd.Index(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True], dtype=bool)
"""
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
if method is None:
if tolerance is not None:
raise ValueError('tolerance argument only valid if using pad, '
'backfill or nearest lookups')
try:
return self._engine.get_loc(key)
except KeyError:
return self._engine.get_loc(self._maybe_cast_indexer(key))
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError('get_loc requires scalar valued input')
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
# if we have something that is Index-like, then
# use this, e.g. DatetimeIndex
s = getattr(series, '_values', None)
if isinstance(s, (ExtensionArray, Index)) and is_scalar(key):
# GH 20825
# Unify Index and ExtensionArray treatment
# First try to convert the key to a location
# If that fails, see if key is an integer, and
# try that
try:
iloc = self.get_loc(key)
return s[iloc]
except KeyError:
if is_integer(key):
return s[key]
s = com._values_from_object(series)
k = com._values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
try:
return self._engine.get_value(s, k,
tz=getattr(series.dtype, 'tz', None))
except KeyError as e1:
if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
raise
try:
return libindex.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if is_scalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(com._values_from_object(arr),
com._values_from_object(key), value)
def _get_level_values(self, level):
"""
Return an Index of values for requested level, equal to the length
of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``self``, as there is only one level in the Index.
See also
---------
pandas.MultiIndex.get_level_values : get values for a level of a
MultiIndex
"""
self._validate_index_level(level)
return self
get_level_values = _get_level_values
def droplevel(self, level=0):
"""
Return index with requested level(s) removed. If resulting index has
only 1 level left, the result will be of Index type, not MultiIndex.
.. versionadded:: 0.23.1 (support for non-MultiIndex)
Parameters
----------
level : int, str, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
index : Index or MultiIndex
"""
if not isinstance(level, (tuple, list)):
level = [level]
levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]
if len(level) == 0:
return self
if len(level) >= self.nlevels:
raise ValueError("Cannot remove {} levels from an index with {} "
"levels: at least one level must be "
"left.".format(len(level), self.nlevels))
# The two checks above guarantee that here self is a MultiIndex
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.any():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
from .multi import MultiIndex
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
_index_shared_docs['get_indexer'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
>>> indexer = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
"""
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
# Treat boolean labels passed to a numeric index as not found. Without
# this fix False and True would be treated as 0 and 1 respectively.
# (GH #16877)
if target.is_boolean() and self.is_numeric():
return _ensure_platform_int(np.repeat(-1, target.size))
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit,
tolerance=tolerance)
if not is_dtype_equal(self.dtype, target.dtype):
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(target, method=method, limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad' or method == 'backfill':
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == 'nearest':
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if tolerance is not None:
raise ValueError('tolerance argument only valid if doing pad, '
'backfill or nearest reindexing')
if limit is not None:
raise ValueError('limit argument only valid if doing pad, '
'backfill or nearest reindexing')
indexer = self._engine.get_indexer(target._ndarray_values)
return _ensure_platform_int(indexer)
def _convert_tolerance(self, tolerance, target):
# override this method on subclasses
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return tolerance
def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
if self.is_monotonic_increasing and target.is_monotonic_increasing:
method = (self._engine.get_pad_indexer if method == 'pad' else
self._engine.get_backfill_indexer)
indexer = method(target._ndarray_values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method,
limit)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target._ndarray_values,
indexer,
tolerance)
return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and target are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],
side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, 'pad', limit=limit)
right_indexer = self.get_indexer(target, 'backfill', limit=limit)
target = np.asarray(target)
left_distances = abs(self.values[left_indexer] - target)
right_distances = abs(self.values[right_indexer] - target)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(op(left_distances, right_distances) |
(right_indexer == -1), left_indexer, right_indexer)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer,
tolerance)
return indexer
def _filter_indexer_tolerance(self, target, indexer, tolerance):
distance = abs(self.values[indexer] - target)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
_index_shared_docs['get_indexer_non_unique'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : ndarray of int
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array
"""
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = _ensure_index(target)
if is_categorical(target):
target = target.astype(target.dtype.categories.dtype)
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.is_all_dates:
self = Index(self.asi8)
tgt_values = target.asi8
else:
tgt_values = target._ndarray_values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return _ensure_platform_int(indexer), missing
def get_indexer_for(self, target, **kwargs):
"""
guaranteed return of an indexer even when non-unique
This dispatches to get_indexer or get_indexer_nonunique as appropriate
"""
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer
def _maybe_promote(self, other):
# A hack, but it works
from pandas.core.indexes.datetimes import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if not is_object_dtype(self.dtype):
return self.astype('object'), other.astype('object')
return self, other
def groupby(self, values):
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to determine the groups.
Returns
-------
groups : dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
from .multi import MultiIndex
if isinstance(values, MultiIndex):
values = values.values
values = _ensure_categorical(values)
result = values._reverse_indexer()
# map to the label
result = {k: self.take(v) for k, v in compat.iteritems(result)}
return result
def map(self, mapper, na_action=None):
"""
Map values using input correspondence (a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping correspondence.
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
from .multi import MultiIndex
new_values = super(Index, self)._map_values(
mapper, na_action=na_action)
attributes = self._get_attributes_dict()
# we can return a MultiIndex
if new_values.size and isinstance(new_values[0], tuple):
if isinstance(self, MultiIndex):
names = self.names
elif attributes.get('name'):
names = [attributes.get('name')] * len(new_values[0])
else:
names = None
return MultiIndex.from_tuples(new_values,
names=names)
attributes['copy'] = False
if not new_values.size:
# empty
attributes['dtype'] = self.dtype
return Index(new_values, **attributes)
def isin(self, values, level=None):
"""
Return a boolean array where the index values are in `values`.
Compute boolean array of whether each index value is found in the
passed set of values. The length of the returned boolean array matches
the length of the index.
Parameters
----------
values : set or list-like
Sought values.
.. versionadded:: 0.18.1
Support for values as a set.
level : str or int, optional
Name or position of the index level to use (if the index is a
`MultiIndex`).
Returns
-------
is_contained : ndarray
NumPy array of boolean values.
See also
--------
Series.isin : Same for Series.
DataFrame.isin : Same method for DataFrames.
Notes
-----
In the case of `MultiIndex` you must either specify `values` as a
list-like object containing tuples that are the same length as the
number of levels, or specify `level`. Otherwise it will raise a
``ValueError``.
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Examples
--------
>>> idx = pd.Index([1,2,3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.isin([1, 4])
array([ True, False, False])
>>> midx = pd.MultiIndex.from_arrays([[1,2,3],
... ['red', 'blue', 'green']],
... names=('number', 'color'))
>>> midx
MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']],
labels=[[0, 1, 2], [2, 0, 1]],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
>>> midx.isin(['red', 'orange', 'yellow'], level='color')
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
>>> midx.isin([(1, 'red'), (3, 'red')])
array([ True, False, False])
For a DatetimeIndex, string values in `values` are converted to
Timestamps.
>>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']
>>> dti = pd.to_datetime(dates)
>>> dti
DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],
dtype='datetime64[ns]', freq=None)
>>> dti.isin(['2000-03-11'])
array([ True, False, False])
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(self, values)
def _can_reindex(self, indexer):
"""
*this is an internal non-public method*
Check if we are allowing reindexing with this particular indexer
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self.is_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'name')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = self._simple_new(None, dtype=self.dtype, **attrs)
else:
target = _ensure_index(target)
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
_, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
"with a method or limit")
indexer, missing = self.get_indexer_non_unique(target)
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy()
target.name = self.name
return target, indexer
def _reindex_non_unique(self, target):
"""
*this is an internal non-public method*
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
target = _ensure_index(target)
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer))
missing = _ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = _ensure_int64(length[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = _ensure_int64(length[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = 0
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
new_index = self._shallow_copy_with_infer(new_labels, freq=None)
return new_index, indexer, new_indexer
_index_shared_docs['join'] = """
*this is an internal non-public method*
Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
sort : boolean, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword)
.. versionadded:: 0.20.0
Returns
-------
join_index, (left_indexer, right_indexer)
"""
@Appender(_index_shared_docs['join'])
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# try to figure out the join level
# GH3662
if level is None and (self_is_mi or other_is_mi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how,
return_indexers=return_indexers)
# join on the level
if level is not None and (self_is_mi or other_is_mi):
return self._join_level(other, level, how=how,
return_indexers=return_indexers)
other = _ensure_index(other)
if len(other) == 0 and how in ('left', 'outer'):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ('right', 'outer'):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {'right': 'left', 'left': 'right'}.get(how, how)
result = other.join(self, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.join(other, how=how, return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
else:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
except TypeError:
pass
if how == 'left':
join_index = self
elif how == 'right':
join_index = other
elif how == 'inner':
join_index = self.intersection(other)
elif how == 'outer':
join_index = self.union(other)
if sort:
join_index = join_index.sort_values()
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
self_names = com._not_none(*self.names)
other_names = com._not_none(*other.names)
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
if not len(overlap):
raise ValueError("cannot join with no level specified and no "
"overlapping names")
if len(overlap) > 1:
raise NotImplementedError("merging with more than one level "
"overlap on a multi-index is not "
"implemented")
jl = overlap[0]
# make the indices into mi's that match
if not (self_is_mi and other_is_mi):
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
# flip if join method is right or left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
return_indexers=return_indexers)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
# 2 multi-indexes
raise NotImplementedError("merging with both multi-indexes is not "
"implemented")
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.core.reshape.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers([self._ndarray_values],
[other._ndarray_values],
how=how,
sort=True)
left_idx = _ensure_platform_int(left_idx)
right_idx = _ensure_platform_int(right_idx)
join_index = np.asarray(self._ndarray_values.take(left_idx))
mask = left_idx == -1
np.putmask(join_index, mask, other._ndarray_values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(self, other, level, how='left', return_indexers=False,
keep_order=True):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex. If `keep_order` == True, the
order of the data indexed by the MultiIndex will not be changed;
otherwise, it will tie out with `other`.
"""
from .multi import MultiIndex
def _get_leaf_sorter(labels):
"""
returns sorter for the inner most level while preserving the
order of higher levels
"""
if labels[0].size == 0:
return np.empty(0, dtype='int64')
if len(labels) == 1:
lab = _ensure_int64(labels[0])
sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max())
return sorter
# find indexers of beginning of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = _ensure_int64(labels[-1])
return lib.get_level_sorter(lab, _ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError('Index._join_level on non-unique index '
'is not implemented')
new_level, left_lev_indexer, right_lev_indexer = \
old_level.join(right, how=how, return_indexers=True)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.labels[:level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = _ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
new_lev_labels = algos.take_nd(rev_indexer, left.labels[level],
allow_fill=False)
new_labels = list(left.labels)
new_labels[level] = new_lev_labels
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
mask = new_lev_labels != -1
if not mask.all():
new_labels = [lab[mask] for lab in new_labels]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
ngroups = 1 + new_lev_labels.max()
left_indexer, counts = libalgos.groupsort_indexer(
new_lev_labels, ngroups)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0]:]
new_labels = [lab[left_indexer] for lab in new_labels]
else: # sort the leaves
mask = new_lev_labels != -1
mask_all = mask.all()
if not mask_all:
new_labels = [lab[mask] for lab in new_labels]
left_indexer = _get_leaf_sorter(new_labels[:level + 1])
new_labels = [lab[left_indexer] for lab in new_labels]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(levels=new_levels, labels=new_labels,
names=left.names, verify_integrity=False)
if right_lev_indexer is not None:
right_indexer = algos.take_nd(right_lev_indexer,
join_index.labels[level],
allow_fill=False)
else:
right_indexer = join_index.labels[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
left_indexer = (None if left_indexer is None
else _ensure_platform_int(left_indexer))
right_indexer = (None if right_indexer is None
else _ensure_platform_int(right_indexer))
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how='left', return_indexers=False):
if self.equals(other):
ret_index = other if how == 'right' else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self._ndarray_values
ov = other._ndarray_values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == 'left':
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == 'right':
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == 'left':
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == 'right':
join_index, ridx, lidx = self._left_indexer(ov, sv)
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
lidx = None if lidx is None else _ensure_platform_int(lidx)
ridx = None if ridx is None else _ensure_platform_int(ridx)
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered or unique index, compute the slice indexer for input
labels and step.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, default None
kind : string, default None
Returns
-------
indexer : slice
Raises
------
KeyError : If key does not exist, or key is not unique and index is
not ordered.
Notes
-----
This function assumes that the data is sorted, so use at your own peril
Examples
---------
This is a method on all index types. For example you can do:
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_indexer(start='b', end='c')
slice(1, 3)
>>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])
>>> idx.slice_indexer(start='b', end=('c', 'g'))
slice(1, 3)
"""
start_slice, end_slice = self.slice_locs(start, end, step=step,
kind=kind)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index
then try to cast to an int if equivalent
"""
if | is_float(key) | pandas.core.dtypes.common.is_float |
#Import all necessary libraries
#Data File Libraries
import csv
import pandas as pd
import glob
import os
#Math Function Libraries
import math
import statistics
#3D Graphing Libraries
from mpl_toolkits import mplot3d
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
#Define Necessary Functions
#Define function to calculate mode
#Returns the mode of the inputted data
def calculate_mode(input_list):
count_dict = {}
found_mode = []
max_count = 0
for value in input_list:
if value not in count_dict:
count_dict[value] = 0
count_dict[value] += 1
if count_dict[value] > max_count:
max_count = count_dict[value]
for value, count in count_dict.items():
if count == max_count:
found_mode = value
return found_mode, max_count
#Define function to calculate mean
#Returns the mean of the inputted data
def calculate_mean(input_list):
sum = 0
count = len(input_list)
for value in input_list:
if math.isnan(value) == False:
sum += value
found_mean = sum / count
return found_mean
#Define function to calculate median
#Returns the median of the inputted data
def calculate_median(input_list):
found_median = statistics.median_low(input_list)
return found_median
#Define function to calculate range
#Returns the range of the inputted data
def calculate_range(input_list):
sorted_input_list = sorted(input_list)
lowest_value = sorted_input_list[0]
highest_value = sorted_input_list[-1]
found_range = highest_value - lowest_value
return found_range
#Define function to perform all calculations at once
#Returns final values from above 4 functions
def calculation_processor(input_list):
found_mode, max_count = calculate_mode(input_list)
found_mean = calculate_mean(input_list)
found_median = calculate_median(input_list)
found_range = calculate_range(input_list)
return found_mode, found_mean, found_median, found_range
#Define function to present processed data
#Returns processed data in easy-to-read manner
def data_return(found_mode, found_mean, found_median, found_range, data_metric, data_file):
print("\nYou analyzed the metric {data_metric} from the file {data_file}.".format(data_metric = data_metric, data_file = data_file))
print("\nThe mode was {found_mode}".format(found_mode = found_mode))
print("\nThe mean was {found_mean}".format(found_mean = found_mean))
print("\nThe median was {found_median}".format(found_median = found_median))
print("\nThe range was {found_range}".format(found_range = found_range))
#Define function to gather a list for a specific metric from all files in a folder (ASK ABOUT GLOB + PANDAS)
#Returns a list that serves as input for future functions
def multiple_file_panda_read(data_folder, data_metric):
input_list = []
os.chdir("/" + data_folder)
filenames = [i for i in glob.glob('*.csv')]
df_collection = (pd.read_csv(f) for f in filenames)
concatenated_df = pd.concat(df_collection, ignore_index = True, sort = True)
input_list = concatenated_df[data_metric]
return input_list
#Define function to gather a list for a specific metric from a single file
#Returns a list that serves as input for future functions
def single_file_panda_read(data_folder, data_file, data_metric):
file_storage_value = ''
input_list = []
os.chdir("/" + data_folder)
filenames = [i for i in glob.glob('*.csv')]
if data_file in filenames:
file_storage_value = data_file
df = pd.read_csv(file_storage_value)
input_list = df[data_metric]
return input_list
#Define function to return a plot of the XYZ scatter plot graph
#Returns a 3D scatter plot graph
def X_Y_Z_plot(data_folder, data_file, graph_parameters, pathname, save_graph):
coordinate_dictionary = {}
file_storage_value = ''
os.chdir("/" + data_folder)
filenames = [i for i in glob.glob('*.csv')]
if (data_file == 'all.csv'):
df_collection = (pd.read_csv(f) for f in filenames)
concatenated_df = pd.concat(df_collection, ignore_index=True)
dataframe = concatenated_df
else:
if data_file in filenames:
file_storage_value = data_file
dataframe = | pd.read_csv(file_storage_value) | pandas.read_csv |
import biom
from numpy import arange
from pandas import DataFrame
from pandas.testing import assert_frame_equal
import pytest
from qurro._rank_utils import filter_unextreme_features
from qurro.generate import biom_table_to_sparse_df, process_input
from qurro.tests.test_df_utils import get_test_data as get_test_data_2
def get_test_data():
"""Returns a ranks DataFrame and a BIOM table for use in testing."""
feature_ids = ["F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8"]
sample_ids = ["S1", "S2", "S3", "S4", "S5"]
ranks = DataFrame(
{
"Rank 0": [1, 2, 3, 4, 5, 6, 7, 8],
"Rank 1": [8, 7, 6, 5, 4, 3, 2, 1],
},
index=feature_ids,
)
# Based on the BIOM docs' example of initialization using a np ndarray --
# http://biom-format.org/documentation/table_objects.html#examples
#
# arange(40) generates a numpy ndarray that just goes from 0 to 39 (i.e.
# contains 40 numbers). We reshape this ndarray to give it a sort of
# "tabular" structure (a 2-D array containing 8 arrays, each with 5
# numbers).
underlying_table_data = arange(40).reshape(8, 5)
# Set the third sample in the data to contain all zeros, except for a
# count for F4 (so we can test what this function does with so-called
# "empty" samples after filtering out F4).
underlying_table_data[:, 2] = 0.0
underlying_table_data[3, 2] = 1.0
# Finally, use the data to create a BIOM table object.
biom_table = biom.Table(underlying_table_data, feature_ids, sample_ids)
# ...And yeah we're actually making it into a Sparse DF because that's what
# I changed filter_unextreme_features() to expect now.
# (TODO: simplify this code in the future?)
output_table = biom_table_to_sparse_df(biom_table)
return output_table, ranks
def test_filtering_basic():
"""Tests the standard behavior of filter_unextreme_features()."""
table, ranks = get_test_data()
filtered_table, filtered_ranks = filter_unextreme_features(table, ranks, 2)
# Check that the appropriate features/samples were filtered out of the
# table. NOTE -- I know this is sloppy code. Would like to fix it in the
# future.
for fid in ["F1", "F2", "F7", "F8"]:
assert fid in filtered_table.index
for fid in ["F3", "F4", "F5", "F6"]:
assert fid not in filtered_table.index
# Check that all samples were preserved.
# (The removal of empty features is done *after*
# filter_unextreme_features() is called in normal Qurro execution, so we
# should expect all samples -- even empty ones -- to remain here.
for sid in ["S1", "S2", "S3", "S4", "S5"]:
assert sid in filtered_table.columns
# Check that the appropriate data is left in the table.
assert list(filtered_table.loc["F1"]) == [0, 1, 0, 3, 4]
assert list(filtered_table.loc["F2"]) == [5, 6, 0, 8, 9]
assert list(filtered_table.loc["F7"]) == [30, 31, 0, 33, 34]
assert list(filtered_table.loc["F8"]) == [35, 36, 0, 38, 39]
# Check that the rank filtering worked as expected.
expected_filtered_ranks = DataFrame(
{"Rank 0": [1, 2, 7, 8], "Rank 1": [8, 7, 2, 1]},
index=["F1", "F2", "F7", "F8"],
)
assert_frame_equal(
filtered_ranks, expected_filtered_ranks, check_like=True
)
def test_filtering_large_efc():
"""Tests filter_unextreme_features() when (the extreme feature count * 2)
is greater than or equal to the number of ranked features.
"""
table, ranks = get_test_data()
# The number of ranked features is 8.
filtered_table, filtered_ranks = filter_unextreme_features(table, ranks, 4)
assert_frame_equal(table, filtered_table)
assert_frame_equal(ranks, filtered_ranks)
filtered_table, filtered_ranks = filter_unextreme_features(table, ranks, 8)
| assert_frame_equal(table, filtered_table) | pandas.testing.assert_frame_equal |
import requests
import pandas as pd
import datetime as dt
import numpy as np
#### dict_keys(['table', 'currency', 'code', 'rates'])
def getCurrencyLow(days, currency, currencyName): # Getting currency below 350 days (used for getting data from larger time periods)
today = dt.datetime.today()
for i in range(days + 1):
last = today - dt.timedelta(days = i)
startDate = today.strftime('%Y-%m-%d')
endDate = last.strftime('%Y-%m-%d')
connect = 'http://api.nbp.pl/api/exchangerates/rates/a/' + currency + '/' + endDate + '/' + startDate
connectResult = requests.get(connect)
response = connectResult.json()
data = | pd.DataFrame(response['rates'], columns=['effectiveDate', 'mid'], index=None) | pandas.DataFrame |
"""Utilities for generating idot files."""
import io
import itertools
from typing import Iterable
from typing import List
from typing import Sequence
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
SOURCE_PLATE = 'source_name'
TARGET_PLATE = 'target_name'
UL = 'Volume [uL]'
SOURCE_WELL_INDEX = 'source_index'
SOURCE_WELL = 'Source Well'
TARGET_WELL = 'Target Well'
LIQUID_NAME = 'Liquid Name'
DRUG = 'drug'
REP = 'rep'
ROWS_96 = tuple('ABCDEFGH')
COLS_96 = tuple(range(1, 13))
MAX_WORKING_VOLUME_UL = 60
HEADING_STRING = '''sep=,
MultiSourceMultiPlate,1.7.2021.1105,<User Name>,02/14/22,11:14 AM'''
SOURCE_TARGET_HEADING_STRING = '''S.100 Plate,{src},,8.00E-05,{plate_type},{target},,Waste Tube
DispenseToWaste=False,DispenseToWasteCycles=3,DispenseToWasteVolume=1e-7,UseDeionisation=True,OptimizationLevel=ReorderAndParallel,WasteErrorHandlingLevel=Ask,SaveLiquids=Always'''
def duplicate_drug_per_plate(rep_df: pd.DataFrame, drug: str,
plates_per_drug_instance: int) -> pd.DataFrame:
"""Converts single drug into multiple instances of drug.
Args:
rep_df: Standard DataFrame containing drug and target well info.
drug: name of drug to make multiple instances of
plates_per_drug_instance: Rate to create instances
Returns:
Dataframe with "drug" replaced by "drug0", "drug1" ... "drugn"
"""
for i, target_plate in enumerate(rep_df[TARGET_PLATE].unique()):
mask = rep_df[DRUG] == drug
mask &= rep_df[TARGET_PLATE] == target_plate
rep_df.loc[mask, DRUG] = f'{drug}{i // plates_per_drug_instance}'
return rep_df
def split_target_df_by_plates(target_df: pd.DataFrame, plates_per_split: int
) -> List[pd.DataFrame]:
"""Splits into multiple target_dfs based on plates_per_split."""
df_plates = sorted(target_df[TARGET_PLATE].unique())
target_df_chunks = list()
for i in range(0, len(df_plates), plates_per_split):
# pylint: disable=unused-variable
query_plates = df_plates[i: i + plates_per_split]
# pylint: enable=unused-variable
target_df_chunks.append(
target_df.query(f'{TARGET_PLATE} == @query_plates'))
return target_df_chunks
def zip_plates_and_wells(plates: Sequence[Union[str, int]],
rows: Sequence[Union[str, int]] = ROWS_96,
cols: Sequence[Union[str, int]] = COLS_96
) -> Tuple[Iterable[str], Iterable[str]]:
"""Returns plates and wells ordered by plate, column - row."""
# Orders by plate, column-row. The idot robot grabs one column at a time so
# we want the source wells packed into columns.
product = itertools.product(plates, cols, rows)
plates, wells = zip(*[(f'{p}', f'{r}{c}') for p, c, r in product])
return plates, wells
def sort_96_with_384_wells(target_well_df: pd.DataFrame) -> pd.DataFrame:
"""Sorts 384 well plates by every other row to match 96 well pitch."""
sort_df = target_well_df[TARGET_WELL].str.extract(r'([A-Z])(\d+)')
sort_df.columns = ['row', 'col']
sort_df['even'] = (sort_df.row.apply(ord) - ord('A')) % 2
sort_df['col'] = sort_df['col'].astype(int)
sort_df['plate'] = target_well_df[TARGET_PLATE]
sort_df.sort_values(['plate', 'col', 'even', 'row'], inplace=True)
return target_well_df.loc[sort_df.index].reset_index(drop=True)
def make_source_and_target_plates(target_df_list: List[pd.DataFrame],
max_volume_ul: float = MAX_WORKING_VOLUME_UL
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Generates optimized source and target plates.
Args:
target_df_list: A list of standard dataframes.
Each dataframe will have dedicated source plates generated for it.
max_volume_ul: maximum volume of a source well.
Returns:
concatenated source plates, concatenated sorted target plates
"""
source_plate_list = list()
target_sorted_list = list()
starting_source_plate = 1
d_si = [DRUG, SOURCE_WELL_INDEX]
for df in target_df_list:
df_sorted = sort_96_with_384_wells(df)
# If the volume of source drug is larger than maximum well volume, then we
# need to put the drug into multiple wells. SOURCE_WELL_INDEX is used to
# index which well the drug is sourced from.
# Calculate in discrete steps to ensure that there is always enough liquid
# in a source well for each discrete liquid transfer.
source_well_not_declared = -1
df_sorted[SOURCE_WELL_INDEX] = source_well_not_declared
source_index = 0
while True:
undeclared = df_sorted[SOURCE_WELL_INDEX] == source_well_not_declared
no_overflow = df_sorted.groupby(d_si)[UL].cumsum() <= max_volume_ul
mask = no_overflow & undeclared
df_sorted.loc[mask, SOURCE_WELL_INDEX] = source_index
source_index += 1
if not mask.any():
break
source_plate_df = df_sorted.groupby(d_si).sum()
source_plate_df = source_plate_df.loc[
df_sorted.set_index(d_si).index].reset_index()
source_plate_df = source_plate_df.drop_duplicates()
max_src_plates = np.ceil(len(source_plate_df) / 96).astype(int)
src_plate_index = np.arange(
starting_source_plate,
starting_source_plate + max_src_plates + 1).astype(int)
fill = np.floor(1 + np.log10(src_plate_index.max())).astype(int)
src_plate_names = [i.zfill(fill) for i in src_plate_index.astype(str)]
src_plates, src_wells = zip_plates_and_wells(src_plate_names)
source_plate_df[SOURCE_PLATE] = list(src_plates)[:len(source_plate_df)]
source_plate_df[SOURCE_WELL] = list(src_wells)[:len(source_plate_df)]
source_plate_list.append(source_plate_df)
target_sorted_list.append(df_sorted)
starting_source_plate += max_src_plates
return ( | pd.concat(source_plate_list, ignore_index=True) | pandas.concat |
import concurrent.futures
import logging
import os.path
import tempfile
import time
import zipfile
from pathlib import Path
from typing import Iterable
from typing import List
import boto3
import lib.core as constances
import pandas as pd
import requests
from botocore.exceptions import ClientError
from lib.app.analytics.common import aggregate_image_annotations_as_df
from lib.app.analytics.common import consensus_plot
from lib.app.analytics.common import image_consensus
from lib.core.conditions import Condition
from lib.core.conditions import CONDITION_EQ as EQ
from lib.core.entities import FolderEntity
from lib.core.entities import MLModelEntity
from lib.core.entities import ProjectEntity
from lib.core.enums import ExportStatus
from lib.core.enums import ProjectType
from lib.core.exceptions import AppException
from lib.core.exceptions import AppValidationException
from lib.core.repositories import BaseManageableRepository
from lib.core.repositories import BaseReadOnlyRepository
from lib.core.serviceproviders import SuerannotateServiceProvider
from lib.core.usecases.base import BaseInteractiveUseCase
from lib.core.usecases.base import BaseUseCase
from lib.core.usecases.images import GetBulkImages
logger = logging.getLogger("root")
class PrepareExportUseCase(BaseUseCase):
def __init__(
self,
project: ProjectEntity,
folder_names: List[str],
backend_service_provider: SuerannotateServiceProvider,
include_fuse: bool,
only_pinned: bool,
annotation_statuses: List[str] = None,
):
super().__init__(),
self._project = project
self._folder_names = list(folder_names) if folder_names else None
self._backend_service = backend_service_provider
self._annotation_statuses = annotation_statuses
self._include_fuse = include_fuse
self._only_pinned = only_pinned
def validate_only_pinned(self):
if (
self._project.upload_state == constances.UploadState.EXTERNAL.value
and self._only_pinned
):
raise AppValidationException(
f"Pin functionality is not supported for projects containing {self._project.project_type} attached with URLs"
)
def validate_fuse(self):
if (
self._project.upload_state == constances.UploadState.EXTERNAL.value
and self._include_fuse
):
raise AppValidationException(
f"Include fuse functionality is not supported for projects containing {self._project.project_type} attached with URLs"
)
def execute(self):
if self.is_valid():
if self._project.upload_state == constances.UploadState.EXTERNAL.value:
self._include_fuse = False
if not self._annotation_statuses:
self._annotation_statuses = (
constances.AnnotationStatus.IN_PROGRESS.name,
constances.AnnotationStatus.COMPLETED.name,
constances.AnnotationStatus.QUALITY_CHECK.name,
constances.AnnotationStatus.RETURNED.name,
constances.AnnotationStatus.NOT_STARTED.name,
constances.AnnotationStatus.SKIPPED.name,
)
response = self._backend_service.prepare_export(
project_id=self._project.uuid,
team_id=self._project.team_id,
folders=self._folder_names,
annotation_statuses=self._annotation_statuses,
include_fuse=self._include_fuse,
only_pinned=self._only_pinned,
)
if "error" in response:
raise AppException(response["error"])
report_message = ""
if self._folder_names:
report_message = f"[{', '.join(self._folder_names)}] "
logger.info(
f"Prepared export {response['name']} for project {self._project.name} "
f"{report_message}(project ID {self._project.uuid})."
)
self._response.data = response
return self._response
class GetExportsUseCase(BaseUseCase):
def __init__(
self,
service: SuerannotateServiceProvider,
project: ProjectEntity,
return_metadata: bool = False,
):
super().__init__()
self._service = service
self._project = project
self._return_metadata = return_metadata
def execute(self):
if self.is_valid():
data = self._service.get_exports(
team_id=self._project.team_id, project_id=self._project.uuid
)
self._response.data = data
if not self._return_metadata:
self._response.data = [i["name"] for i in data]
return self._response
class CreateModelUseCase(BaseUseCase):
def __init__(
self,
base_model_name: str,
model_name: str,
model_description: str,
task: str,
team_id: int,
train_data_paths: Iterable[str],
test_data_paths: Iterable[str],
backend_service_provider: SuerannotateServiceProvider,
projects: BaseReadOnlyRepository,
folders: BaseReadOnlyRepository,
ml_models: BaseManageableRepository,
hyper_parameters: dict = None,
):
super().__init__()
self._base_model_name = base_model_name
self._model_name = model_name
self._model_description = model_description
self._task = task
self._team_id = team_id
self._hyper_parameters = hyper_parameters
self._train_data_paths = train_data_paths
self._test_data_paths = test_data_paths
self._backend_service = backend_service_provider
self._ml_models = ml_models
self._projects = projects
self._folders = folders
@property
def hyper_parameters(self):
if self._hyper_parameters:
for parameter in constances.DEFAULT_HYPER_PARAMETERS:
if parameter not in self._hyper_parameters:
self._hyper_parameters[
parameter
] = constances.DEFAULT_HYPER_PARAMETERS[parameter]
else:
self._hyper_parameters = constances.DEFAULT_HYPER_PARAMETERS
return self._hyper_parameters
@staticmethod
def split_path(path: str):
if "/" in path:
return path.split("/")
return path, "root"
def execute(self):
train_folder_ids = []
test_folder_ids = []
projects = []
for path in self._train_data_paths:
project_name, folder_name = self.split_path(path)
projects = self._projects.get_all(
Condition("name", project_name, EQ)
& Condition("team_id", self._team_id, EQ)
)
projects.extend(projects)
folders = self._folders.get_all(
Condition("name", folder_name, EQ)
& Condition("team_id", self._team_id, EQ)
& Condition("project_id", projects[0].uuid, EQ)
)
train_folder_ids.append(folders[0].uuid)
for path in self._test_data_paths:
project_name, folder_name = self.split_path(path)
projects.extend(
self._projects.get_all(
Condition("name", project_name, EQ)
& Condition("team_id", self._team_id, EQ)
)
)
folders = self._folders.get_all(
Condition("name", folder_name, EQ)
& Condition("team_id", self._team_id, EQ)
& Condition("project_id", projects[0].uuid, EQ)
)
test_folder_ids.append(folders[0].uuid)
project_types = [project.project_type for project in projects]
if set(train_folder_ids) & set(test_folder_ids):
self._response.errors = AppException(
"Avoid overlapping between training and test data."
)
return
if len(set(project_types)) != 1:
self._response.errors = AppException(
"All projects have to be of the same type. Either vector or pixel"
)
return
if any(
{
True
for project in projects
if project.upload_state == constances.UploadState.EXTERNAL.value
}
):
self._response.errors = AppException(
"The function does not support projects containing images attached with URLs"
)
return
base_model = self._ml_models.get_all(
Condition("name", self._base_model_name, EQ)
& Condition("team_id", self._team_id, EQ)
& Condition("task", constances.MODEL_TRAINING_TASKS[self._task], EQ)
& Condition("type", project_types[0], EQ)
& Condition("include_global", True, EQ)
)[0]
if base_model.model_type != project_types[0]:
self._response.errors = AppException(
f"The type of provided projects is {project_types[0]}, "
"and does not correspond to the type of provided model"
)
return self._response
completed_images_data = self._backend_service.bulk_get_folders(
self._team_id, [project.uuid for project in projects]
)
complete_image_count = sum(
[
folder["completedCount"]
for folder in completed_images_data["data"]
if folder["id"] in train_folder_ids
]
)
ml_model = MLModelEntity(
name=self._model_name,
description=self._model_description,
task=constances.MODEL_TRAINING_TASKS[self._task],
base_model_id=base_model.uuid,
image_count=complete_image_count,
model_type=project_types[0],
train_folder_ids=train_folder_ids,
test_folder_ids=test_folder_ids,
hyper_parameters=self.hyper_parameters,
)
new_model_data = self._ml_models.insert(ml_model)
self._response.data = new_model_data
return self._response
class GetModelMetricsUseCase(BaseUseCase):
def __init__(
self,
model_id: int,
team_id: int,
backend_service_provider: SuerannotateServiceProvider,
):
super().__init__()
self._model_id = model_id
self._team_id = team_id
self._backend_service = backend_service_provider
def execute(self):
metrics = self._backend_service.get_model_metrics(
team_id=self._team_id, model_id=self._model_id
)
self._response.data = metrics
return self._response
class UpdateModelUseCase(BaseUseCase):
def __init__(
self, model: MLModelEntity, models: BaseManageableRepository,
):
super().__init__()
self._models = models
self._model = model
def execute(self):
model = self._models.update(self._model)
self._response.data = model
return self._response
class DeleteMLModel(BaseUseCase):
def __init__(self, model_id: int, models: BaseManageableRepository):
super().__init__()
self._model_id = model_id
self._models = models
def execute(self):
self._response.data = self._models.delete(self._model_id)
return self._response
class StopModelTraining(BaseUseCase):
def __init__(
self,
model_id: int,
team_id: int,
backend_service_provider: SuerannotateServiceProvider,
):
super().__init__()
self._model_id = model_id
self._team_id = team_id
self._backend_service = backend_service_provider
def execute(self):
is_stopped = self._backend_service.stop_model_training(
self._team_id, self._model_id
)
if not is_stopped:
self._response.errors = AppException("Something went wrong.")
return self._response
class DownloadExportUseCase(BaseInteractiveUseCase):
def __init__(
self,
service: SuerannotateServiceProvider,
project: ProjectEntity,
export_name: str,
folder_path: str,
extract_zip_contents: bool,
to_s3_bucket: bool,
):
super().__init__()
self._service = service
self._project = project
self._export_name = export_name
self._folder_path = folder_path
self._extract_zip_contents = extract_zip_contents
self._to_s3_bucket = to_s3_bucket
self._temp_dir = None
def upload_to_s3_from_folder(self, folder_path: str):
to_s3_bucket = boto3.Session().resource("s3").Bucket(self._to_s3_bucket)
files_to_upload = list(Path(folder_path).rglob("*.*"))
def _upload_file_to_s3(_to_s3_bucket, _path, _s3_key) -> None:
_to_s3_bucket.upload_file(_path, _s3_key)
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
results = []
for path in files_to_upload:
s3_key = f"{self._folder_path}/{path.name}"
results.append(
executor.submit(_upload_file_to_s3, to_s3_bucket, str(path), s3_key)
)
yield
def download_to_local_storage(self, destination: str):
exports = self._service.get_exports(
team_id=self._project.team_id, project_id=self._project.uuid
)
export = next(filter(lambda i: i["name"] == self._export_name, exports), None)
export = self._service.get_export(
team_id=self._project.team_id,
project_id=self._project.uuid,
export_id=export["id"],
)
if not export:
raise AppException("Export not found.")
export_status = export["status"]
while export_status != ExportStatus.COMPLETE.value:
logger.info("Waiting 5 seconds for export to finish on server.")
time.sleep(5)
export = self._service.get_export(
team_id=self._project.team_id,
project_id=self._project.uuid,
export_id=export["id"],
)
if "error" in export:
raise AppException(export["error"])
export_status = export["status"]
if export_status in (ExportStatus.ERROR.value, ExportStatus.CANCELED.value):
raise AppException("Couldn't download export.")
filename = Path(export["path"]).name
filepath = Path(destination) / filename
with requests.get(export["download"], stream=True) as response:
response.raise_for_status()
with open(filepath, "wb") as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
if self._extract_zip_contents:
with zipfile.ZipFile(filepath, "r") as f:
f.extractall(destination)
Path.unlink(filepath)
return export["id"], filepath, destination
def get_upload_files_count(self):
if not self._temp_dir:
self._temp_dir = tempfile.TemporaryDirectory()
self.download_to_local_storage(self._temp_dir.name)
return len(list(Path(self._temp_dir.name).rglob("*.*")))
def execute(self):
if self.is_valid():
report = []
if self._to_s3_bucket:
self.get_upload_files_count()
yield from self.upload_to_s3_from_folder(self._temp_dir.name)
report.append(
f"Exported to AWS {self._to_s3_bucket}/{self._folder_path}"
)
self._temp_dir.cleanup()
else:
export_id, filepath, destination = self.download_to_local_storage(
self._folder_path
)
if self._extract_zip_contents:
report.append(f"Extracted {filepath} to folder {destination}")
else:
report.append(f"Downloaded export ID {export_id} to {filepath}")
yield
self._response.data = "\n".join(report)
return self._response
class DownloadMLModelUseCase(BaseUseCase):
def __init__(
self,
model: MLModelEntity,
download_path: str,
backend_service_provider: SuerannotateServiceProvider,
team_id: int,
):
super().__init__()
self._model = model
self._download_path = download_path
self._backend_service = backend_service_provider
self._team_id = team_id
def validate_training_status(self):
if self._model.training_status not in [
constances.TrainingStatus.COMPLETED.value,
constances.TrainingStatus.FAILED_AFTER_EVALUATION_WITH_SAVE_MODEL.value,
]:
raise AppException("Unable to download.")
def execute(self):
if self.is_valid():
metrics_name = os.path.basename(self._model.path).replace(".pth", ".json")
mapper_path = self._model.config_path.replace(
os.path.basename(self._model.config_path), "classes_mapper.json"
)
metrics_path = self._model.config_path.replace(
os.path.basename(self._model.config_path), metrics_name
)
auth_response = self._backend_service.get_ml_model_download_tokens(
self._team_id, self._model.uuid
)
if not auth_response.ok:
raise AppException(auth_response.error)
s3_session = boto3.Session(
aws_access_key_id=auth_response.data.access_key,
aws_secret_access_key=auth_response.data.secret_key,
aws_session_token=auth_response.data.session_token,
region_name=auth_response.data.region,
)
bucket = s3_session.resource("s3").Bucket(auth_response.data.bucket)
bucket.download_file(
self._model.config_path,
os.path.join(self._download_path, "config.yaml"),
)
bucket.download_file(
self._model.path,
os.path.join(self._download_path, os.path.basename(self._model.path)),
)
try:
bucket.download_file(
metrics_path, os.path.join(self._download_path, metrics_name)
)
bucket.download_file(
mapper_path,
os.path.join(self._download_path, "classes_mapper.json"),
)
except ClientError:
logger.info(
"The specified model does not contain a classes_mapper and/or a metrics file."
)
self._response.data = self._model
return self._response
class BenchmarkUseCase(BaseUseCase):
def __init__(
self,
project: ProjectEntity,
ground_truth_folder_name: str,
folder_names: list,
export_dir: str,
image_list: list,
annotation_type: str,
show_plots: bool,
):
super().__init__()
self._project = project
self._ground_truth_folder_name = ground_truth_folder_name
self._folder_names = folder_names
self._export_dir = export_dir
self._image_list = image_list
self._annotation_type = annotation_type
self._show_plots = show_plots
def execute(self):
project_df = aggregate_image_annotations_as_df(self._export_dir)
gt_project_df = project_df[
project_df["folderName"] == self._ground_truth_folder_name
]
benchmark_dfs = []
for folder_name in self._folder_names:
folder_df = project_df[project_df["folderName"] == folder_name]
project_gt_df = pd.concat([folder_df, gt_project_df])
project_gt_df = project_gt_df[project_gt_df["instanceId"].notna()]
if self._image_list is not None:
project_gt_df = project_gt_df.loc[
project_gt_df["imageName"].isin(self._image_list)
]
project_gt_df.query("type == '" + self._annotation_type + "'", inplace=True)
project_gt_df = project_gt_df.groupby(
["imageName", "instanceId", "folderName"]
)
def aggregate_attributes(instance_df):
def attribute_to_list(attribute_df):
attribute_names = list(attribute_df["attributeName"])
attribute_df["attributeNames"] = len(attribute_df) * [
attribute_names
]
return attribute_df
attributes = None
if not instance_df["attributeGroupName"].isna().all():
attrib_group_name = instance_df.groupby("attributeGroupName")[
["attributeGroupName", "attributeName"]
].apply(attribute_to_list)
attributes = dict(
zip(
attrib_group_name["attributeGroupName"],
attrib_group_name["attributeNames"],
)
)
instance_df.drop(
["attributeGroupName", "attributeName"], axis=1, inplace=True
)
instance_df.drop_duplicates(
subset=["imageName", "instanceId", "folderName"], inplace=True
)
instance_df["attributes"] = [attributes]
return instance_df
project_gt_df = project_gt_df.apply(aggregate_attributes).reset_index(
drop=True
)
unique_images = set(project_gt_df["imageName"])
all_benchmark_data = []
for image_name in unique_images:
image_data = image_consensus(
project_gt_df, image_name, self._annotation_type
)
all_benchmark_data.append( | pd.DataFrame(image_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = | pd.period_range('2011-01-01 09:00', freq='H', periods=5) | pandas.period_range |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 16 15:33:07 2018
@author: ysye
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.preprocessing import minmax_scale
from math import log
from sklearn.metrics import roc_curve, auc
from scipy import stats
from matplotlib.colors import ListedColormap
from scipy.spatial import distance
#os.chdir('E:/Users/yusen/Project/Project3/Python code/CICRLET_package/src/CIRCLET')
from . import CIRCLET_DEFINE
from . import CIRCLET_CORE
bcolors_3=['#EF4F50','#587FBF','#CCCCCC']
bcolors_6=['#587FBF','#3FA667','#EF4F50','#FFAAA3','#414C50','#D3D3D3']
bcolors_12=['#CC1B62','#FBBC00','#0E8934','#AC1120','#EA7B00','#007AB7',
'#9A35B4','#804E1F' ,'#BEAB81','#D32414','#75AB09','#004084']
def change_index(passed_qc_sc_DF_cond,soft_add,software,UBI=['1CDU', '1CD_G1', '1CD_eS', '1CD_mS', '1CD_lS_G2']):
"""
which measures how frequent an
experimentally determined single cell labels changes along the time-series.
"""
#read order of single cell
if (software=='wishbone') | (software=='CIRCLET'):
phenotime=pd.read_table(soft_add,header=None,index_col=0)
phenotime.columns=['Pseudotime']
phenotime['cond']=passed_qc_sc_DF_cond
ordIndex=phenotime.sort_values(by='Pseudotime')
#cond_order=[cond for cond in ordIndex['cond'] if cond in UBI]
elif software=='multi-metric':
passed_qc_sc_DF=pd.read_table(soft_add,header=0,index_col=0)
phenotime=passed_qc_sc_DF[['ord','cond']]
ordIndex=phenotime.sort_values(by='ord')
cond_order=[cond for cond in ordIndex['cond'] if cond in UBI]
#generate penalty table
penal_table=np.ones((len(UBI),len(UBI)))
for loc in range(len(UBI)):
penal_table[loc,loc]=0
#if loc==0:
# penal_table[loc,1]=0
# penal_table[loc,2]=0
penal_table=(np.triu(penal_table)+np.triu(penal_table).T)
penalty_sum=0
sc_number=len(cond_order)
for k,cond in enumerate(cond_order):
phase1=UBI.index(cond)
phase2=UBI.index(cond_order[(k+1)%sc_number])
penalty_sum+=penal_table[phase1,phase2]
change_score=1-(penalty_sum-4)/(sc_number-4)
return change_score
def evaluate_continue_change(con_distri_features,soft_add,software):
#roc_auc_DF=evaluation_ranks(passed_qc_sc_DF_cond,soft_add,software,UBI=UBIs[1:5])
#plot_evaluate_heat(passed_qc_sc_DF_RO,soft_add,con_distri_features,software,UBIs)
if software=='multi-metric':
passed_qc_sc_DF=pd.read_table(soft_add,header=0,index_col=0)
phenotime=passed_qc_sc_DF[['ord']]
elif (software=='wishbone') | (software=='CIRCLET'):
phenotime=pd.read_table(soft_add,header=None,index_col=0)
phenotime.columns=['Pseudotime']
ordIndex=phenotime.sort_values(by='Pseudotime')
old_sc_name=ordIndex.index[-1]
sc_name=ordIndex.index[0]
corr_list=list()
for sc_name in ordIndex.index:
x=con_distri_features.loc[old_sc_name]
y=con_distri_features.loc[sc_name]
old_sc_name=sc_name
#temp=stats.pearsonr(x,y)[0]
#temp=distance.cosine(x,y)
#temp=np.abs(distance.cosine(x,y)-1)
temp=np.abs(distance.correlation(x,y)-1)
corr_list.append(temp)
evaluation_value=np.mean(corr_list)
#print(evaluation_value)
return evaluation_value
def computing_AUC(Rank_list):
"""
Compulating AUC
"""
y_true=Rank_list['bench']
y_score=np.max(Rank_list['Pseudotime'])-Rank_list['Pseudotime']
fpr,tpr,threshold = roc_curve(y_true,y_score)
roc_auc = auc(fpr, tpr)
if roc_auc<0.5:
roc_auc=1-roc_auc
#plt.plot(fpr,tpr)
return roc_auc
#soft_con_distri_Res_add=soft_add
def evaluation_ranks(passed_qc_sc_DF_cond,soft_con_distri_Res_add,software,UBI,key='not'):
"""
Calculate the AUC curve values according to the order of the rankings
between the two UBI pairs to obtain the distribution of AUC values.
"""
#UsingBatchIDs=['1CDU', '1CDX1', '1CDX2', '1CDX3', '1CDX4', '1CDES']
#UBIs=['1CDU', '1CD_G1', '1CD_eS', '1CD_mS', '1CD_lS_G2', 'NoSort']
if software=='multi-metric':
passed_qc_sc_DF=pd.read_table(soft_con_distri_Res_add,header=0,index_col=0)
MM_phenotime=passed_qc_sc_DF[['ord','cond']]
MM_phenotime.columns=['Pseudotime','cond']
ordIndex_soft=MM_phenotime.sort_values(by='Pseudotime')
elif (software=='wishbone') | (software=='CIRCLET'):
wishbone_phenotime=pd.read_table(soft_con_distri_Res_add,header=None,index_col=0)
wishbone_phenotime.columns=['Pseudotime']
wishbone_phenotime['cond']=passed_qc_sc_DF_cond[wishbone_phenotime.index].values
ordIndex_soft=wishbone_phenotime.sort_values(by='Pseudotime')
#fig,ax=plt.subplots()
roc_auc_DF=list()
#k=1
for k, UB1 in enumerate(UBI):
UB1=UBI[k]
UB2=UBI[(k+1)%len(UBI)]
Rank_list=ordIndex_soft.loc[(ordIndex_soft['cond']==UB1) | (ordIndex_soft['cond']==UB2)]
Rank_list['bench']=0
Rank_list.loc[Rank_list['cond']==UB1,'bench']=1
cell1=Rank_list.index[0:10]
cell2=ordIndex_soft.index[0:10]
cell3=Rank_list.index[-10:]
cell4=ordIndex_soft.index[-10:]
if ((len(cell1.intersection(cell2))>3) & (len(cell3.intersection(cell4))>3) & (key=='acc')):
roc_auc=0
cell_UB1=(Rank_list['cond']==UB1).index
for cell in cell_UB1:
#cell=Rank_list.index[k]
X=Rank_list.loc[ cell,'Pseudotime']
Rank_list['Pseudotime']=(Rank_list['Pseudotime']-X+1.0)%1
new_roc_auc=computing_AUC(Rank_list)
roc_auc=np.max([roc_auc,new_roc_auc])
print(roc_auc)
else:
roc_auc=computing_AUC(Rank_list)
roc_auc_DF.append(roc_auc)
roc_auc_DF=pd.DataFrame(roc_auc_DF,index=UBI)
return roc_auc_DF
def plot_evaluate_heat(passed_qc_sc_DF_RO,soft_con_distri_Res_add,software,UBI):
"""
plot evaluate heatmap
"""
ordIndex_Nature=passed_qc_sc_DF_RO['ord']
if software=='multi-metric':
#passed_qc_sc_DF=pd.read_table(soft_con_distri_Res_add,header=0,index_col=0)
#ordIndex_soft=(passed_qc_sc_DF['ord'].T.values-1)
cycle_cells_v2=np.zeros(len(ordIndex_Nature),dtype=int)
for i,order in enumerate(ordIndex_Nature):
cycle_cells_v2[int(order)-1]=i
ordIndex_soft=cycle_cells_v2
elif (software=='wishbone') | (software=='CIRCLET'):
wishbone_phenotime=pd.read_table(soft_con_distri_Res_add,header=None)
wishbone_phenotime.columns=['cellnames','Pseudotime']
ordIndex_soft=wishbone_phenotime.sort_values(by='Pseudotime').index
Fluo_comp=np.zeros((len(UBI),len(ordIndex_soft)))
for j,rank in enumerate(ordIndex_soft):
sc_cell=passed_qc_sc_DF_RO.index[rank]
i=UBI.index(passed_qc_sc_DF_RO.loc[sc_cell,'cond'])
#i=UsingBatchIDs.index(sc_cell.split("_")[0])
Fluo_comp[i,j]=i+1
fig,ax=plt.subplots(figsize=(4,2.5))
#cmap=["#B4F8FF","#2FD4E6","#992F71","#E61898","#99862F","#E6C018","#0FFF01"]
cmap=["#EFEFEF","#EF4F50","#3FA667","#587FBF","#FFAAA3"]
#my_cmap = ListedColormap(sns.color_palette(flatui).as_hex())
#my_cmap = ListedColormap(cmap.as_hex())
my_cmap = ListedColormap(cmap)
sns.heatmap(Fluo_comp,xticklabels=False, yticklabels=False,cmap=my_cmap,ax=ax)
#plt.title(soft_con_distri_Res_add.split('/')[-1].rstrip('.txt'))
#soft_add=Result_file
def evaluate_software(passed_qc_sc_DF_RO,data_type,soft_add,software,UBI,type_names,key='not'):
#passed_qc_sc_DF_cond=passed_qc_sc_DF_RO['cond']
roc_auc_DF=evaluation_ranks(data_type,soft_add,software,UBI,key)
change_score=change_index(data_type,soft_add,software,UBI)
plot_evaluate_heat(passed_qc_sc_DF_RO,soft_add,software,UBI)
roc_auc_DF.index=[ 'AUC:'+phase+'-'+type_names[(m+1)%len(type_names)] for m,phase in enumerate(type_names)]
CS=pd.Series(change_score,index=['LCS'])
evaluation=pd.concat([roc_auc_DF,CS])
return evaluation
def evaluate_natrue(passed_qc_sc_DF_RO,index,UBIs,UsingBatchIDs,key='not'):
#passed_qc_sc_DF_cond=passed_qc_sc_DF_RO['cond']
soft_add="./data_RO/passed_qc_sc_DF_RO.txt"
software='multi-metric'
Nature_evaluation=evaluate_software(passed_qc_sc_DF_RO,soft_add,software,UBIs,UsingBatchIDs,key)
for value in Nature_evaluation.values:
print("%.4f" % value)
return Nature_evaluation
from sklearn.preprocessing import minmax_scale
#feature_files=sub_feature_files
def merge_features(feature_files,HiC_dir,index,Isminmax=True):
#print(sub_feature_files,DS)
temp_data=pd.DataFrame()
#filename=sub_feature_files[0]
for k,filename in enumerate(feature_files):
feature_data=pd.read_table(HiC_dir+'/'+filename, sep='\t', header=0,index_col=0)
if np.isnan(np.max(feature_data.values)):
feature_data=feature_data.fillna(0)
if Isminmax:
fea_data= minmax_scale(feature_data,feature_range=(0.01,1),axis=0,copy=True)
feature_data= | pd.DataFrame(fea_data,columns=feature_data.columns,index=feature_data.index) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from constants import *
import numpy as np
import pandas as pd
import utils
import time
from collections import deque, defaultdict
from scipy.spatial.distance import cosine
from scipy import stats
import math
seed = SEED
cur_stage = CUR_STAGE
mode = cur_mode
#used_recall_source = 'i2i_w02-b2b-i2i2i'
#used_recall_source = 'i2i_w02-b2b-i2i2i-i2i_w10'
#used_recall_source = 'i2i_w02-b2b-i2i2i-i2i_w10-i2i2b'
used_recall_source = cur_used_recall_source
sum_mode = 'nosum'
used_recall_source = used_recall_source+'-'+sum_mode
print( f'Recall Source Use {used_recall_source}')
def feat_item_sum_mean_sim_weight_loc_weight_time_weight_rank_weight(data):
df = data.copy()
df = df[ ['user','item','sim_weight','loc_weight','time_weight','rank_weight','index'] ]
feat = df[ ['index','user','item'] ]
df = df.groupby( ['user','item'] )[ ['sim_weight','loc_weight','time_weight','rank_weight'] ].agg( ['sum','mean'] ).reset_index()
cols = [ f'item_{j}_{i}' for i in ['sim_weight','loc_weight','time_weight','rank_weight'] for j in ['sum','mean'] ]
df.columns = [ 'user','item' ]+ cols
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat = feat[ cols ]
return feat
def feat_sum_sim_loc_time_weight(data):
df = data.copy()
df = df[ ['index','sim_weight','loc_weight','time_weight'] ]
feat = df[ ['index'] ]
feat['sum_sim_loc_time_weight'] = df['sim_weight'] + df['loc_weight'] + df['time_weight']
feat = feat[ ['sum_sim_loc_time_weight'] ]
return feat
def feat_road_item_text_cossim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
c = np.dot( item1_text, item2_text )
a = np.linalg.norm( item1_text )
b = np.linalg.norm( item2_text )
return c/(a*b+(1e-9))
else:
return np.nan
feat['road_item_text_cossim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_cossim'] ]
return feat
def feat_road_item_text_eulasim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text - item2_text )
return a
else:
return np.nan
feat['road_item_text_eulasim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_eulasim'] ]
return feat
def feat_road_item_text_mansim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text - item2_text, ord=1 )
return a
else:
return np.nan
feat['road_item_text_mansim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_mansim'] ]
return feat
def feat_road_item_image_cossim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[1]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
c = np.dot( item1_image, item2_image )
a = np.linalg.norm( item1_image )
b = np.linalg.norm( item2_image )
return c/(a*b+(1e-9))
else:
return np.nan
feat['road_item_image_cossim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_cossim'] ]
return feat
def feat_road_item_image_eulasim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[1]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
a = np.linalg.norm( item1_image - item2_image )
return a
else:
return np.nan
feat['road_item_image_eulasim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_eulasim'] ]
return feat
def feat_road_item_image_mansim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
a = np.linalg.norm( item1_image - item2_image, ord=1 )
return a
else:
return np.nan
feat['road_item_image_mansim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_mansim'] ]
return feat
def feat_i2i_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
i2i_sim_seq = {}
st0 = time.time()
tot = 0
for user, items in user_item_dict.items():
times = user_time_dict[user]
if tot % 500 == 0:
print( f'tot: {len(user_item_dict)}, now: {tot}' )
tot += 1
for loc1, item in enumerate(items):
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
if (item,relate_item) not in new_keys:
continue
t1 = times[loc1]
t2 = times[loc2]
i2i_sim_seq.setdefault((item,relate_item), [])
i2i_sim_seq[ (item,relate_item) ].append( (loc1, loc2, t1, t2, len(items) ) )
st1 = time.time()
print(st1-st0)
return i2i_sim_seq
def feat_i2i2i_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
all_pair_num = 0
sim_item_p2 = {}
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item_p2.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
all_pair_num += 1
t1 = times[loc1]
t2 = times[loc2]
sim_item_p2[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
sim_item_p1 = {}
for i, related_items in sim_item_p2.items():
sim_item_p1[i] = {}
for j, cij in related_items.items():
sim_item_p1[i][j] = cij / (item_cnt[i] * item_cnt[j])
sim_item_p2[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
print('all_pair_num',all_pair_num)
for key in sim_item_p2.keys():
t = sim_item_p2[key]
t = sorted(t.items(), key=lambda d:d[1], reverse = True )
res = {}
for i in t[0:50]:
res[i[0]]=i[1]
sim_item_p2[key] = res
i2i2i_sim_seq = {}
t1 = time.time()
for idx,item1 in enumerate( sim_item_p2.keys() ):
if idx%10000==0:
t2 = time.time()
print( f'use time {t2-t1} for 10000, now {idx} , tot {len(sim_item_p2.keys())}' )
t1 = t2
for item2 in sim_item_p2[item1].keys():
if item2 == item1:
continue
for item3 in sim_item_p2[item2].keys():
if item3 == item1 or item3 == item2:
continue
if (item1,item3) not in new_keys:
continue
i2i2i_sim_seq.setdefault((item1,item3), [])
i2i2i_sim_seq[ (item1,item3) ].append( ( item2, sim_item_p2[item1][item2], sim_item_p2[item2][item3],
sim_item_p1[item1][item2], sim_item_p1[item2][item3] ) )
return i2i2i_sim_seq
def feat_i2i2b_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
all_pair_num = 0
sim_item_p2 = {}
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item_p2.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
all_pair_num += 1
t1 = times[loc1]
t2 = times[loc2]
sim_item_p2[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
sim_item_p1 = {}
for i, related_items in sim_item_p2.items():
sim_item_p1[i] = {}
for j, cij in related_items.items():
sim_item_p1[i][j] = cij / (item_cnt[i] * item_cnt[j])
sim_item_p2[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
print('all_pair_num',all_pair_num)
for key in sim_item_p2.keys():
t = sim_item_p2[key]
t = sorted(t.items(), key=lambda d:d[1], reverse = True )
res = {}
for i in t[0:100]:
res[i[0]]=i[1]
sim_item_p2[key] = res
blend_sim = utils.load_sim(item_blend_sim_path)
blend_score = {}
for item in blend_sim:
i = item[0]
blend_score.setdefault(i,{})
for j,cij in item[1][:100]:
blend_score[i][j] = cij
i2i2b_sim_seq = {}
t1 = time.time()
for idx,item1 in enumerate( sim_item_p2.keys() ):
if idx%10000==0:
t2 = time.time()
print( f'use time {t2-t1} for 10000, now {idx} , tot {len(sim_item_p2.keys())}' )
t1 = t2
for item2 in sim_item_p2[item1].keys():
if (item2 == item1) or (item2 not in blend_score.keys()):
continue
for item3 in blend_score[item2].keys():
if item3 == item1 or item3 == item2:
continue
if (item1,item3) not in new_keys:
continue
i2i2b_sim_seq.setdefault((item1,item3), [])
i2i2b_sim_seq[ (item1,item3) ].append( ( item2, sim_item_p2[item1][item2], blend_score[item2][item3],
sim_item_p1[item1][item2], blend_score[item2][item3] ) )
return i2i2b_sim_seq
def feat_i2i_sim(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
for key in new_keys:
if np.isnan( result[key] ):
continue
result[key] = result[key] / ((item_cnt[key[0]] * item_cnt[key[1]]) ** 0.2)
print('Finished getting result')
feat['i2i_sim'] = feat['new_keys'].map(result)
#import pdb
#pdb.set_trace()
#i2i_seq_feat = pd.concat( [feat,i2i_seq_feat], axis=1 )
#i2i_seq_feat['itemAB'] = i2i_seq_feat['road_item'].astype('str') + '-' + i2i_seq_feat['item'].astype('str')
feat = feat[ ['i2i_sim'] ]
return feat
def feat_i2i_sim_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += loc_weight
feat['i2i_sim_abs_loc_weights_loc_base'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_sim_abs_loc_weights_loc_base'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_sim_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
loc_diff = loc1-loc2
loc_weight = (loc_base**loc_diff)
if abs(loc_weight) <= 0.2:
if loc_weight > 0:
loc_weight = 0.2
else:
loc_weight = -0.2
result[key] += loc_weight
feat['i2i_sim_loc_weights_loc_base'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_sim_loc_weights_loc_base'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_sim_abs_time_weights(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
result[key] += time_weight
feat['i2i_sim_abs_time_weights'] = feat['new_keys'].map(result)
print('Finished getting result')
cols = [ 'i2i_sim_abs_time_weights' ]
feat = feat[ cols ]
return feat
def feat_i2i_sim_time_weights(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
time_weight = (1 - (t1 - t2) * 100)
if abs(time_weight)<=0.2:
if time_weight > 0:
time_weight = 0.2
else:
time_weight = -0.2
result[key] += time_weight
feat['i2i_sim_time_weights'] = feat['new_keys'].map(result)
print('Finished getting result')
cols = [ 'i2i_sim_time_weights' ]
feat = feat[ cols ]
return feat
def feat_i2i_cijs_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
feat['i2i_cijs_abs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_cijs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
time_weight = (1 - abs(t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = abs(loc2-loc1)
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
if loc1-loc2>0:
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
else:
result[key] -= 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
feat['i2i_cijs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_cijs_mean_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += ( 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len) ) / len(records)
feat['i2i_cijs_mean_abs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_mean_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_bottom_itemcnt_sum_weight(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
#print('Loading i2i_sim_seq')
#i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
#print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
weights = [0.2,0.4,0.6,0.8,1.0]
for weight in weights:
print(f'Starting {weight}')
result = {}
for key in new_keys:
if (key[0] in item_cnt.keys()) and (key[1] in item_cnt.keys()):
result[key] = ((item_cnt[key[0]] + item_cnt[key[1]]) ** weight)
feat['i2i_bottom_itemcnt_sum_weight_'+str(weight)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for weight in weights:
cols.append( 'i2i_bottom_itemcnt_sum_weight_'+str(weight) )
feat = feat[ cols ]
return feat
def feat_i2i_bottom_itemcnt_multi_weight(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
#print('Loading i2i_sim_seq')
#i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
#print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
weights = [0.2,0.4,0.6,0.8,1.0]
for weight in weights:
print(f'Starting {weight}')
result = {}
for key in new_keys:
if (key[0] in item_cnt.keys()) and (key[1] in item_cnt.keys()):
result[key] = ((item_cnt[key[0]] * item_cnt[key[1]]) ** weight)
feat['i2i_bottom_itemcnt_multi_weight_'+str(weight)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for weight in weights:
cols.append( 'i2i_bottom_itemcnt_multi_weight_'+str(weight) )
feat = feat[ cols ]
return feat
def feat_b2b_sim(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
blend_sim = utils.load_sim(item_blend_sim_path)
b2b_sim = {}
for item in blend_sim:
i = item[0]
b2b_sim.setdefault(i,{})
for j,cij in item[1][:100]:
b2b_sim[i][j] = cij
vals = feat[ ['road_item','item'] ].values
result = []
for val in vals:
item1 = val[0]
item2 = val[1]
if item1 in b2b_sim.keys():
if item2 in b2b_sim[item1].keys():
result.append( b2b_sim[ item1 ][ item2 ] )
else:
result.append( np.nan )
else:
result.append( np.nan )
feat['b2b_sim'] = result
feat = feat[ ['b2b_sim'] ]
return feat
def feat_itemqa_loc_diff(data):
df = data.copy()
feat = df[ ['index','query_item_loc','road_item_loc'] ]
feat['itemqa_loc_diff'] = feat['road_item_loc'] - feat['query_item_loc']
def func(s):
if s<0:
return -s
return s
feat['abs_itemqa_loc_diff'] = feat['itemqa_loc_diff'].apply(func)
feat = feat[ ['itemqa_loc_diff','abs_itemqa_loc_diff'] ]
return feat
def feat_sim_three_weight(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
loc_weights = {}
time_weights = {}
record_weights = {}
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
loc_weights.setdefault(item, {})
time_weights.setdefault(item, {})
record_weights.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
loc_weights[item].setdefault(relate_item, 0)
time_weights[item].setdefault(relate_item, 0)
record_weights[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
loc_weights[item][relate_item] += loc_weight
time_weights[item][relate_item] += time_weight
record_weights[item][relate_item] += len(items)
com_item_cnt[item][relate_item] += 1
st1 = time.time()
print(st1-st0)
print('start')
num = feat.shape[0]
road_item = feat['road_item'].values
t_item = feat['item'].values
com_item_loc_weights_sum = np.zeros( num, dtype=float )
com_item_time_weights_sum = np.zeros( num, dtype=float )
com_item_record_weights_sum = np.zeros( num, dtype=float )
t_com_item_cnt = np.zeros( num, dtype=float )
for i in range(num):
if road_item[i] in item_set:
if t_item[i] in item_dict_set[ road_item[i] ]:
com_item_loc_weights_sum[i] = loc_weights[ road_item[i] ][ t_item[i] ]
com_item_time_weights_sum[i] = time_weights[ road_item[i] ][ t_item[i] ]
com_item_record_weights_sum[i] = record_weights[ road_item[i] ][ t_item[i] ]
t_com_item_cnt[i] = com_item_cnt[ road_item[i] ][ t_item[i] ]
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
feat['com_item_loc_weights_sum'] = com_item_loc_weights_sum
feat['com_item_time_weights_sum'] = com_item_time_weights_sum
feat['com_item_record_weights_sum'] = com_item_record_weights_sum
feat['com_item_cnt'] = t_com_item_cnt
feat['com_item_loc_weights_mean'] = feat['com_item_loc_weights_sum'] / feat['com_item_cnt']
feat['com_item_time_weights_mean'] = feat['com_item_time_weights_sum'] / feat['com_item_cnt']
feat['com_item_record_weights_mean'] = feat['com_item_record_weights_sum'] / feat['com_item_cnt']
feat = feat[ ['com_item_loc_weights_sum','com_item_time_weights_sum','com_item_record_weights_sum',
'com_item_loc_weights_mean','com_item_time_weights_mean','com_item_record_weights_mean' ] ]
st2 = time.time()
print(st2-st1)
return feat
def feat_different_type_road_score_sum_mean(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
feat['i2i_score'] = feat['sim_weight']
feat['blend_score'] = feat['sim_weight']
feat['i2i2i_score'] = feat['sim_weight']
feat.loc[ feat['recall_type']!=0 , 'i2i_score'] = np.nan
feat.loc[ feat['recall_type']!=1 , 'blend_score'] = np.nan
feat.loc[ feat['recall_type']!=2 , 'i2i2i_score'] = np.nan
feat['user_item'] = feat['user'].astype('str') + '-' + feat['item'].astype('str')
for col in ['i2i_score','blend_score','i2i2i_score']:
df = feat[ ['user_item',col,'index'] ]
df = df.groupby('user_item')[col].sum().reset_index()
df[col+'_sum'] = df[col]
df = df[ ['user_item',col+'_sum'] ]
feat = pd.merge( feat, df, on='user_item', how='left')
df = feat[ ['user_item',col,'index'] ]
df = df.groupby('user_item')[col].mean().reset_index()
df[col+'_mean'] = df[col]
df = df[ ['user_item',col+'_mean'] ]
feat = pd.merge( feat, df, on='user_item', how='left')
feat = feat[ ['i2i_score','i2i_score_sum','i2i_score_mean',
'blend_score','blend_score_sum','blend_score_mean',
'i2i2i_score','i2i2i_score_sum','i2i2i_score_mean',] ]
return feat
def feat_different_type_road_score_sum_mean_new(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
recall_source_names = ['i2i_w02','b2b','i2i2i','i2i_w10','i2i2b']
recall_source_names = [ i+'_score' for i in recall_source_names ]
for idx,col in enumerate(recall_source_names):
feat[col] = feat['sim_weight']
feat.loc[ feat['recall_type']!=idx, col ] = np.nan
for col in recall_source_names:
df = feat[ ['user','item',col,'index'] ]
df = df.groupby( ['user','item'] )[col].sum().reset_index()
df[col+'_sum'] = df[col]
df = df[ ['user','item',col+'_sum'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
df = feat[ ['user','item',col,'index'] ]
df = df.groupby( ['user','item'] )[col].mean().reset_index()
df[col+'_mean'] = df[col]
df = df[ ['user','item',col+'_mean'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat_list = recall_source_names + [ col+'_sum' for col in recall_source_names ] + [ col+'_mean' for col in recall_source_names ]
feat = feat[ feat_list ]
return feat
def feat_sim_base(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int)
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
com_item_cnt[item][relate_item] += 1.0
st1 = time.time()
print(st1-st0)
print('start')
num = feat.shape[0]
road_item = feat['road_item'].values
t_item = feat['item'].values
road_item_cnt = np.zeros( num, dtype=float )
t_item_cnt = np.zeros( num, dtype=float )
com_item_cij = np.zeros( num, dtype=float )
t_com_item_cnt = np.zeros( num, dtype=float )
for i in range(num):
if road_item[i] in item_set:
road_item_cnt[i] = item_cnt[ road_item[i] ]
if t_item[i] in item_dict_set[ road_item[i] ]:
com_item_cij[i] = sim_item[ road_item[i] ][ t_item[i] ]
t_com_item_cnt[i] = com_item_cnt[ road_item[i] ][ t_item[i] ]
else:
com_item_cij[i] = np.nan
t_com_item_cnt[i] = np.nan
else:
road_item_cnt[i] = np.nan
com_item_cij[i] = np.nan
t_com_item_cnt[i] = np.nan
if t_item[i] in item_set:
t_item_cnt[i] = item_cnt[ t_item[i] ]
else:
t_item_cnt[i] = np.nan
feat['road_item_cnt'] = road_item_cnt
feat['item_cnt'] = t_item_cnt
feat['com_item_cij'] = com_item_cij
feat['com_item_cnt'] = t_com_item_cnt
feat = feat[ ['road_item_cnt','item_cnt','com_item_cij','com_item_cnt' ] ]
st2 = time.time()
print(st2-st1)
return feat
def feat_u2i_abs_loc_weights_loc_base(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_loc','road_item_loc'] ].values
loc_bases = [0.1,0.3,0.5,0.7,0.9]
for loc_base in loc_bases:
result = []
for val in vals:
loc1 = val[0]
loc2 = val[1]
if loc2 >= loc1:
loc_diff = loc2-loc1
else:
loc_diff = loc1-loc2-1
loc_weight = loc_base**loc_diff
if loc_weight<=0.1:
loc_weight = 0.1
result.append(loc_weight)
feat['u2i_abs_loc_weights_loc_base_'+str(loc_base)] = result
cols = []
for loc_base in loc_bases:
cols.append( 'u2i_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_u2i_loc_weights_loc_base(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_loc','road_item_loc'] ].values
loc_bases = [0.1,0.3,0.5,0.7,0.9]
for loc_base in loc_bases:
result = []
for val in vals:
loc1 = val[0]
loc2 = val[1]
if loc2 >= loc1:
loc_diff = loc2-loc1
else:
loc_diff = loc1-loc2-1
loc_weight = loc_base**loc_diff
if abs(loc_weight)<=0.1:
loc_weight = 0.1
if loc2 < loc1:
loc_weight = -loc_weight
result.append(loc_weight)
feat['u2i_loc_weights_loc_base_'+str(loc_base)] = result
cols = []
for loc_base in loc_bases:
cols.append( 'u2i_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_u2i_abs_time_weights(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_time','road_item_time'] ].values
result = []
for val in vals:
t1 = val[0]
t2 = val[1]
time_weight = (1 - abs( t1 - t2 ) * 100)
if time_weight<=0.1:
time_weight = 0.1
result.append(time_weight)
feat['u2i_abs_time_weights'] = result
cols = [ 'u2i_abs_time_weights' ]
feat = feat[ cols ]
return feat
def feat_u2i_time_weights(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_time','road_item_time'] ].values
result = []
for val in vals:
t1 = val[0]
t2 = val[1]
time_weight = (1 - abs( t1 - t2 ) * 100)
if abs(time_weight)<=0.1:
time_weight = 0.1
if t1 > t2:
time_weight = -time_weight
result.append(time_weight)
feat['u2i_time_weights'] = result
cols = [ 'u2i_time_weights' ]
feat = feat[ cols ]
return feat
def feat_automl_cate_count(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
feat['road_item-item'] = feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cate_list = [ 'road_item','item','road_item-item' ]
cols = []
for cate in cate_list:
feat[cate+'_count'] = feat[ cate ].map( feat[ cate ].value_counts() )
cols.append( cate+'_count' )
feat = feat[ cols ]
return feat
def feat_automl_user_cate_count(data):
df = data.copy()
feat = df[ ['index','user','road_item','item'] ]
feat['user-road_item'] = feat['user'].astype('str') + '-' + feat['road_item'].astype('str')
feat['user-item'] = feat['user'].astype('str') + '-' + feat['item'].astype('str')
feat['user-road_item-item'] = feat['user'].astype('str') + '-' + feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cate_list = [ 'user-road_item','user-item','user-road_item-item' ]
cols = []
for cate in cate_list:
feat[cate+'_count'] = feat[ cate ].map( feat[ cate ].value_counts() )
cols.append( cate+'_count' )
feat = feat[ cols ]
return feat
def feat_u2i_road_item_time_diff(data):
df = data.copy()
feat = df[['user','road_item_loc','road_item_time']]
feat = feat.groupby(['user','road_item_loc']).first().reset_index()
feat_group = feat.sort_values(['user','road_item_loc']).set_index(['user','road_item_loc']).groupby('user')
feat1 = feat_group['road_item_time'].diff(1)
feat2 = feat_group['road_item_time'].diff(-1)
feat1.name = 'u2i_road_item_time_diff_history'
feat2.name = 'u2i_road_item_time_diff_future'
feat = df.merge(pd.concat([feat1,feat2],axis=1),how='left',on=['user','road_item_loc'])
cols = [ 'u2i_road_item_time_diff_history', 'u2i_road_item_time_diff_future' ]
feat = feat[ cols ]
return feat
def feat_road_item_text_dot(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
c = np.dot( item1_text, item2_text )
return c
else:
return np.nan
feat['road_item_text_dot'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_dot'] ]
return feat
def feat_road_item_text_norm2(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func1(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text )
b = np.linalg.norm( item2_text )
return a*b
else:
return np.nan
def func2(ss):
item1 = ss
if ( item1 in item_text ):
item1_text = item_text[item1]
a = np.linalg.norm( item1_text )
return a
else:
return np.nan
feat['road_item_text_product_norm2'] = df[ ['road_item','item'] ].apply(func1, axis=1)
feat['road_item_text_norm2'] = df['road_item'].apply(func2)
feat['item_text_norm2'] = df['item'].apply(func2)
feat = feat[ ['road_item_text_product_norm2','road_item_text_norm2','item_text_norm2'] ]
return feat
def feat_automl_cate_count_all_1(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
feat[cate1+'_count_'] = feat[cate1].map( feat[cate1].value_counts() )
cols.append( cate1+'_count_' )
print(f'feat {cate1} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_cate_count_all_2(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
for b in range(a+1,n):
cate2 = categories[b]
name2 = f'{cate1}_{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count_'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count_' )
print(f'feat {feat_tmp.name} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_cate_count_all_3(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
for b in range(a+1,n):
cate2 = categories[b]
for c in range(b+1,n):
cate3 = categories[c]
name3 = f'{cate1}_{cate2}_{cate3}'
feat_tmp = feat.groupby([cate1,cate2,cate3]).size()
feat_tmp.name = f'{name3}_count_'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2,cate3])
cols.append( name3+'_count_' )
print(f'feat {feat_tmp.name} fuck done')
feat = feat[ cols ]
return feat
def feat_time_window_cate_count(data):
if mode=='valid':
all_train_data = utils.load_pickle(all_train_data_path.format(cur_stage))
else:
all_train_data = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_with_time = all_train_data[["item_id", "time"]].sort_values(["item_id", "time"])
item2time = item_with_time.groupby("item_id")["time"].agg(list).to_dict()
utils.dump_pickle(item2time, item2time_path.format(mode))
item2times = utils.load_pickle(item2time_path.format(mode))
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
feat["item_cnt_around_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.01))
feat["item_cnt_before_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.01))
feat["item_cnt_after_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.01))
feat["item_cnt_around_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.02))
feat["item_cnt_before_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.02))
feat["item_cnt_after_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.02))
feat["item_cnt_around_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.05))
feat["item_cnt_before_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.05))
feat["item_cnt_after_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.05))
return feat[[
"item_cnt_around_time_0.01", "item_cnt_before_time_0.01", "item_cnt_after_time_0.01",
"item_cnt_around_time_0.02", "item_cnt_before_time_0.02", "item_cnt_after_time_0.02",
"item_cnt_around_time_0.05", "item_cnt_before_time_0.05", "item_cnt_after_time_0.05",
]]
def feat_time_window_cate_count(data):
# 做这个特征之前,先做一次item2time.py
try:
item2times = utils.load_pickle(item2time_path.format(mode, cur_stage))
except:
raise Exception("做这个特征之前,先做一次item2time.py")
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
feat["item_cnt_around_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.01))
feat["item_cnt_before_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.01))
feat["item_cnt_after_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.01))
feat["item_cnt_around_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.02))
feat["item_cnt_before_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.02))
feat["item_cnt_after_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.02))
feat["item_cnt_around_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.05))
feat["item_cnt_before_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.05))
feat["item_cnt_after_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.05))
feat["item_cnt_around_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.07))
feat["item_cnt_before_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.07))
feat["item_cnt_after_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.07))
feat["item_cnt_around_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.1))
feat["item_cnt_before_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.1))
feat["item_cnt_after_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.1))
feat["item_cnt_around_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.15))
feat["item_cnt_before_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.15))
feat["item_cnt_after_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.15))
return feat[[
"item_cnt_around_time_0.01", "item_cnt_before_time_0.01", "item_cnt_after_time_0.01",
"item_cnt_around_time_0.02", "item_cnt_before_time_0.02", "item_cnt_after_time_0.02",
"item_cnt_around_time_0.05", "item_cnt_before_time_0.05", "item_cnt_after_time_0.05",
"item_cnt_around_time_0.07", "item_cnt_before_time_0.07", "item_cnt_after_time_0.07",
"item_cnt_around_time_0.1", "item_cnt_before_time_0.1", "item_cnt_after_time_0.1",
"item_cnt_around_time_0.15", "item_cnt_before_time_0.15", "item_cnt_after_time_0.15",
]]
#在召回集内,限定时间(qtime 附近) 这个item被召回了多少次
# item2times 改变了 其他的逻辑不变
def item_recall_cnt_around_qtime(data):
item2times = data.groupby("item")["time"].agg(list).to_dict()
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
new_cols = []
new_col_name = "item_recall_cnt_{}_time_{}"
for delta in [0.01, 0.02, 0.05, 0.07, 0.1, 0.15]:
print('running delta: ', delta)
for mode in ["all", "left", "right"]:
new_col = new_col_name.format(mode, delta)
new_cols.append(new_col)
feat[new_col] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode=mode, delta=delta))
return feat[new_cols]
def feat_automl_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
feat['road_item-item'] = feat['road_item'].astype('str')+ '-' + feat['item'].astype('str')
cols = []
for cate1 in ['recall_type']:
for cate2 in ['item','road_item','road_item-item']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_loc_diff_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
feat['road_item-item'] = feat['road_item'].astype('str')+ '-' + feat['item'].astype('str')
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
cols = []
for cate1 in ['loc_diff']:
for cate2 in ['item','road_item','recall_type','road_item-item']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_user_and_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type','user'] ]
feat['road_item-item'] = feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cols = []
for cate1 in ['user']:
for cate2 in ['recall_type']:
for cate3 in ['item','road_item','road_item-item']:
name3 = f'{cate1}-{cate2}-{cate3}'
feat_tmp = feat.groupby([cate1,cate2,cate3]).size()
feat_tmp.name = f'{name3}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2,cate3])
cols.append( name3+'_count' )
print(f'feat {cate1} {cate2} {cate3} fuck done')
feat = feat[ cols ]
return feat
def feat_i2i_cijs_topk_by_loc(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_bases = [0.9]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
result_topk_by_loc = {}
result_history_loc_diff1_cnt = {}
result_future_loc_diff1_cnt = {}
result_history_loc_diff1_time_mean = {}
result_future_loc_diff1_time_mean = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = []
result_history_loc_diff1_cnt[key] = 0.0
result_future_loc_diff1_cnt[key] = 0.0
result_history_loc_diff1_time_mean[key] = 0
result_future_loc_diff1_time_mean[key] = 0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result_history_loc_diff1_cnt[key] += 1
result_history_loc_diff1_time_mean[key] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result_future_loc_diff1_cnt[key] += 1
result_future_loc_diff1_time_mean[key] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key].append( (loc_diff,1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)))
result_history_loc_diff1_time_mean[key] /=(result_history_loc_diff1_cnt[key]+1e-5)
result_future_loc_diff1_time_mean[key] /=(result_future_loc_diff1_cnt[key]+1e-5)
result_one = sorted(result[key],key=lambda x:x[0])
result_one_len = len(result_one)
result_topk_by_loc[key] = [x[1] for x in result_one[:topk]]+[np.nan]*max(0,topk-result_one_len)
feat['history_loc_diff1_com_item_time_mean'] = feat['new_keys'].map(result_history_loc_diff1_time_mean).fillna(0)
feat['future_loc_diff1_com_item_time_mean'] = feat['new_keys'].map(result_future_loc_diff1_time_mean).fillna(0)
feat['history_loc_diff1_com_item_cnt'] = feat['new_keys'].map(result_history_loc_diff1_cnt).fillna(0)
feat['future_loc_diff1_com_item_cnt'] = feat['new_keys'].map(result_future_loc_diff1_cnt).fillna(0)
feat_top = []
for key,value in result_topk_by_loc.items():
feat_top.append([key[0],key[1]]+value)
feat_top = pd.DataFrame(feat_top,columns=['road_item','item']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)])
feat = feat.merge(feat_top,how='left',on=['road_item','item'])
print('Finished getting result')
cols = ['history_loc_diff1_com_item_time_mean',
'future_loc_diff1_com_item_time_mean',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)]
feat = feat[ cols ]
return feat
def feat_i2i_cijs_median_mean_topk(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_bases = [0.9]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
result_median = {}
result_mean = {}
result_topk = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = []
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key].append( 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len))
result_one = sorted(result[key],reverse=True)
result_one_len = len(result_one)
result_median[key] = result_one[result_one_len//2] if result_one_len%2==1 else (result_one[result_one_len//2]+result_one[result_one_len//2-1])/2
result_mean[key] = sum(result[key])/len(result[key])
result_topk[key] = result_one[:topk]+[np.nan]*max(0,topk-result_one_len)
feat['i2i_cijs_median'] = feat['new_keys'].map(result_median)
feat['i2i_cijs_mean'] = feat['new_keys'].map(result_mean)
feat_top = []
for key,value in result_topk.items():
feat_top.append([key[0],key[1]]+value)
feat_top = pd.DataFrame(feat_top,columns=['road_item','item']+[f'i2i_cijs_top{k}_by_cij' for k in range(1,topk+1)])
feat = feat.merge(feat_top,how='left',on=['road_item','item'])
print('Finished getting result')
cols = ['i2i_cijs_median','i2i_cijs_mean']+[f'i2i_cijs_top{k}_by_cij' for k in range(1,topk+1)]
feat = feat[ cols ]
return feat
def feat_different_type_road_score_sum_mean_by_item(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score']#,'i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['item',col,'index'] ]
df = df.groupby('item')[col].sum().reset_index()
df[col+'_by_item_sum'] = df[col]
df = df[ ['item',col+'_by_item_sum'] ]
feat = pd.merge( feat, df, on='item', how='left')
df = feat[ ['item',col,'index'] ]
df = df.groupby('item')[col].mean().reset_index()
df[col+'_by_item_mean'] = df[col]
df = df[ ['item',col+'_by_item_mean'] ]
feat = pd.merge( feat, df, on='item', how='left')
feat = feat[[f'{i}_by_item_{j}' for i in cols for j in ['sum','mean']]]
return feat
def feat_different_type_road_score_mean_by_road_item(data):
df = data.copy()
feat = df[ ['user','road_item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score']#'i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['road_item',col,'index'] ]
df = df.groupby('road_item')[col].mean().reset_index()
df[col+'_by_road_item_mean'] = df[col]
df = df[ ['road_item',col+'_by_road_item_mean'] ]
feat = pd.merge( feat, df, on='road_item', how='left')
feat = feat[[f'{i}_by_road_item_mean' for i in cols]]
return feat
def feat_different_type_road_score_mean_by_loc_diff(data):
df = data.copy()
feat = df[ ['user','index','sim_weight','recall_type'] ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
cols = ['i2i_score','blend_score','i2i2i_score','i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['loc_diff',col,'index'] ]
df = df.groupby('loc_diff')[col].mean().reset_index()
df[col+'_by_loc_diff_mean'] = df[col]
df = df[ ['loc_diff',col+'_by_loc_diff_mean'] ]
feat = pd.merge( feat, df, on='loc_diff', how='left')
feat = feat[[f'{i}_by_loc_diff_mean' for i in cols]]
return feat
def feat_different_type_road_score_sum_mean_by_recall_type_and_item(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score','i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['item','recall_type',col,'index'] ]
df = df.groupby(['item','recall_type'])[col].sum().reset_index()
df[col+'_by_item-recall_type_sum'] = df[col]
df = df[ ['item','recall_type',col+'_by_item-recall_type_sum'] ]
feat = pd.merge( feat, df, on=['item','recall_type'], how='left')
df = feat[ ['item','recall_type',col,'index'] ]
df = df.groupby(['item','recall_type'])[col].mean().reset_index()
df[col+'_by_item-recall_type_mean'] = df[col]
df = df[ ['item','recall_type',col+'_by_item-recall_type_mean'] ]
feat = pd.merge( feat, df, on=['item','recall_type'], how='left')
feat = feat[[f'{i}_by_item-recall_type_{j}' for i in cols for j in ['sum','mean']]]
return feat
def feat_base_info_in_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
#all_train_stage_data = pd.concat( all_train_stage_data.iloc[0:1000], all_train_stage_data.iloc[-10000:] )
df_train_stage = all_train_stage_data
df = data.copy()
feat = df[ ['index','road_item','item','stage'] ]
stage2sim_item = {}
stage2item_cnt = {}
stage2com_item_cnt = {}
for sta in range(cur_stage+1):
df_train = df_train_stage[ df_train_stage['stage']==sta ]
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int)
com_item_cnt = {}
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
com_item_cnt[item][relate_item] += 1.0
stage2sim_item[sta] = sim_item
stage2item_cnt[sta] = item_cnt
stage2com_item_cnt[sta] = com_item_cnt
sta_list = []
itemb_list = []
sum_sim_list = []
count_sim_list = []
mean_sim_list = []
nunique_itema_count_list = []
for sta in range(cur_stage+1):
for key1 in stage2sim_item[sta].keys():
val = 0
count = 0
for key2 in stage2sim_item[sta][key1].keys():
val += stage2sim_item[sta][key1][key2]
count += stage2com_item_cnt[sta][key1][key2]
sta_list.append( sta )
itemb_list.append( key1 )
sum_sim_list.append( val )
count_sim_list.append( count )
mean_sim_list.append( val/count )
nunique_itema_count_list.append( len( stage2sim_item[sta][key1].keys() ) )
data1 = pd.DataFrame( {'stage':sta_list, 'item':itemb_list, 'sum_sim_in_stage':sum_sim_list, 'count_sim_in_stage':count_sim_list,
'mean_sim_in_stage':mean_sim_list, 'nunique_itema_count_in_stage':nunique_itema_count_list } )
'''
sta_list = []
item_list = []
cnt_list = []
for sta in range(cur_stage+1):
for key1 in stage2item_cnt[sta].keys():
sta_list.append(sta)
item_list.append(key1)
cnt_list.append( stage2item_cnt[sta][key1] )
data2 = pd.DataFrame( {'stage':sta_list, 'road_item':item_list, 'stage_road_item_cnt':cnt_list } )
data3 = pd.DataFrame( {'stage':sta_list, 'item':item_list, 'stage_item_cnt':cnt_list } )
'''
#feat = pd.merge( feat,data1, how='left',on=['stage','road_item','item'] )
#feat = pd.merge( feat,data2, how='left',on=['stage','road_item'] )
feat = pd.merge( feat,data1, how='left',on=['stage','item'] )
feat = feat[ ['sum_sim_in_stage','count_sim_in_stage','mean_sim_in_stage','nunique_itema_count_in_stage'] ]
return feat
def feat_item_time_info_in_stage(data):
df = data.copy()
feat = df[ ['index','item','stage','time'] ]
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train_stage = all_train_stage_data
data1 = df_train_stage.groupby( ['stage','item_id'] )['time'].agg( ['max','min','mean'] ).reset_index()
data1.columns = [ 'stage','item','time_max_in_stage','time_min_in_stage','time_mean_in_stage' ]
data1['time_dura_in_stage'] = data1['time_max_in_stage'] - data1['time_min_in_stage']
feat = pd.merge( feat,data1, how='left',on=['stage','item'] )
feat['time_diff_min_in_stage'] = feat['time'] - feat['time_min_in_stage']
feat['time_diff_max_in_stage'] = feat['time_max_in_stage'] - feat['time']
cols = [ 'time_dura_in_stage','time_max_in_stage','time_min_in_stage','time_mean_in_stage','time_diff_min_in_stage','time_diff_max_in_stage' ]
feat = feat[ cols ]
return feat
def feat_user_info_in_stage(data):
df = data.copy()
feat = df[ ['index','item','user','stage'] ]
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train_stage = all_train_stage_data
data1 = df_train_stage.groupby( ['stage','user_id'] )['index'].count()
data1.name = 'user_count_in_stage'
data1 = data1.reset_index()
data1 = data1.rename( columns={'user_id':'user'} )
data2 = df_train_stage.groupby( ['stage','item_id'] )['user_id'].nunique()
data2.name = 'item_nunique_in_stage'
data2 = data2.reset_index()
data2 = data2.rename( columns={'item_id':'item'} )
data3 = df_train_stage.groupby( ['stage','item_id'] )['user_id'].count()
data3.name = 'item_count_in_stage'
data3 = data3.reset_index()
data3 = data3.rename( columns={'item_id':'item'} )
data3[ 'item_ratio_in_stage' ] = data3[ 'item_count_in_stage' ] / data2['item_nunique_in_stage']
feat = pd.merge( feat,data1, how='left',on=['stage','user'] )
feat = pd.merge( feat,data2, how='left',on=['stage','item'] )
feat = pd.merge( feat,data3, how='left',on=['stage','item'] )
cols = [ 'user_count_in_stage','item_nunique_in_stage','item_ratio_in_stage' ]
feat = feat[ cols ]
return feat
def feat_item_com_cnt_in_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_cnt = all_train_stage_data.groupby(["item_id"])["stage"].value_counts().to_dict()
feat = data[["road_item", "stage"]]
feat["head"] = feat.set_index(["road_item", "stage"]).index
feat["itema_cnt_in_stage"] = feat["head"].map(item_stage_cnt)
return feat[["itema_cnt_in_stage"]]
def item_cnt_in_stage2(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_cnt = all_train_stage_data.groupby(["item_id"])["stage"].value_counts().to_dict()
feat = data[["item", "stage"]]
feat["head"] = feat.set_index(["item", "stage"]).index
feat["item_stage_cnt"] = feat["head"].map(item_stage_cnt)
return feat[["item_stage_cnt"]]
def feat_item_cnt_in_different_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
feat = data[["item"]]
cols = []
for sta in range(cur_stage+1):
train_stage_data = all_train_stage_data[ all_train_stage_data['stage']==sta ]
item_stage_cnt = train_stage_data.groupby(['item_id'])['index'].count()
item_stage_cnt.name = f"item_stage_cnt_{sta}"
item_stage_cnt = item_stage_cnt.reset_index()
item_stage_cnt.columns = ['item',f"item_stage_cnt_{sta}"]
feat = pd.merge( feat,item_stage_cnt,how='left',on='item' )
cols.append( f"item_stage_cnt_{sta}" )
#import pdb
#pdb.set_trace()
return feat[ cols ]
def feat_user_cnt_in_different_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
feat = data[["user"]]
cols = []
for sta in range(cur_stage+1):
train_stage_data = all_train_stage_data[ all_train_stage_data['stage']==sta ]
user_stage_cnt = train_stage_data.groupby(['user_id'])['index'].count()
user_stage_cnt.name = f"user_stage_cnt_{sta}"
user_stage_cnt = user_stage_cnt.reset_index()
user_stage_cnt.columns = ['user',f"user_stage_cnt_{sta}"]
feat = pd.merge( feat,user_stage_cnt,how='left',on='user' )
cols.append( f"user_stage_cnt_{sta}" )
#import pdb
#pdb.set_trace()
return feat[ cols ]
def feat_user_and_item_count_in_three_init_data(data):
df = data.copy()
feat = df[ ['index','item','user','stage'] ]
if mode=='valid':
df_train_stage = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
else:
df_train_stage = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
data1 = df_train_stage.groupby( ['stage','item_id'] )['index'].count()
data1.name = 'in_stage_item_count'
data1 = data1.reset_index()
data1 = data1.rename( columns = {'item_id':'item'} )
data2 = df_train_stage.groupby( ['stage','user_id'] )['index'].count()
data2.name = 'in_stage_user_count'
data2 = data2.reset_index()
data2 = data2.rename( columns = {'user_id':'user'} )
data3 = df_train_stage.groupby( ['item_id'] )['index'].count()
data3.name = 'no_in_stage_item_count'
data3 = data3.reset_index()
data3 = data3.rename( columns = {'item_id':'item'} )
data4 = df_train_stage.groupby( ['user_id'] )['index'].count()
data4.name = 'no_in_stage_user_count'
data4 = data4.reset_index()
data4 = data4.rename( columns = {'user_id':'user'} )
data5 = df_train.groupby( ['item_id'] )['index'].count()
data5.name = 'no_stage_item_count'
data5 = data5.reset_index()
data5 = data5.rename( columns = {'item_id':'item'} )
data6 = df_train.groupby( ['user_id'] )['index'].count()
data6.name = 'no_stage_user_count'
data6 = data6.reset_index()
data6 = data6.rename( columns = {'user_id':'user'} )
feat = pd.merge( feat,data1,how='left',on=['stage','item'] )
feat = pd.merge( feat,data2,how='left',on=['stage','user'] )
feat = pd.merge( feat,data3,how='left',on=['item'] )
feat = pd.merge( feat,data4,how='left',on=['user'] )
feat = pd.merge( feat,data5,how='left',on=['item'] )
feat = pd.merge( feat,data6,how='left',on=['user'] )
cols = [ 'in_stage_item_count','in_stage_user_count','no_in_stage_item_count','no_in_stage_user_count','no_stage_item_count','no_stage_user_count' ]
return feat[ cols ]
#def feat_item_count_in_three_init_data(data):
def feat_i2i2i_sim(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i2i_sim_seq')
i2i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = np.zeros((len(new_keys),4))
item_cnt = df_train['item_id'].value_counts().to_dict()
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i2i_sim_seq.keys():
continue
records = i2i2i_sim_seq[key]
result[i,0] = len(records)
if len(records)==0:
print(key)
for record in records:
item,score1_1,score1_2,score2_1,score2_2 = record
result[i,1] += score1_1*score1_2
result[i,2] += score2_1*score2_2
result[i,3] += item_cnt[item]
result[:,1]/=(result[i,0]+1e-9)
result[:,2]/=(result[i,0]+1e-9)
result[:,3]/=(result[i,0]+1e-9)
print('Finished getting result')
cols = ['i2i2i_road_cnt','i2i2i_score1_mean','i2i2i_score2_mean','i2i2i_middle_item_cnt_mean']
result = pd.DataFrame(result,index=new_keys,columns=cols)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
feat = feat[ cols ]
return feat
def feat_i2i2b_sim(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i2b_sim_seq')
i2i2b_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i2b_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i2b_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = np.zeros((len(new_keys),4))
item_cnt = df_train['item_id'].value_counts().to_dict()
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i2b_sim_seq.keys():
continue
records = i2i2b_sim_seq[key]
result[i,0] = len(records)
if len(records)==0:
print(key)
for record in records:
item,score1_1,score1_2,score2_1,score2_2 = record
result[i,1] += score1_1*score1_2
result[i,2] += score2_1*score2_2
result[i,3] += item_cnt[item]
result[:,1]/=(result[i,0]+1e-9)
result[:,2]/=(result[i,0]+1e-9)
result[:,3]/=(result[i,0]+1e-9)
print('Finished getting result')
cols = ['i2i2b_road_cnt','i2i2b_score1_mean','i2i2b_score2_mean','i2i2b_middle_item_cnt_mean']
result = pd.DataFrame(result,index=new_keys,columns=cols)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
feat = feat[ cols ]
return feat
def feat_numerical_groupby_item_cnt_in_stage(data):
df = data.copy()
num_cols = [ 'sim_weight', 'loc_weight', 'time_weight', 'rank_weight' ]
cate_col = 'item_stage_cnt'
feat = df[ ['index','road_item','item'] ]
feat1 = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'item_cnt_in_stage2_{mode}_{cur_stage}.pkl') )
df[ cate_col ] = feat1[ cate_col ]
feat[ cate_col ] = feat1[ cate_col ]
cols = []
for col in num_cols:
t = df.groupby(cate_col)[col].agg( ['mean','max','min'] )
cols += [ f'{col}_{i}_groupby_{cate_col}' for i in ['mean','max','min'] ]
t.columns = [ f'{col}_{i}_groupby_{cate_col}' for i in ['mean','max','min'] ]
t = t.reset_index()
feat = pd.merge( feat, t, how='left', on=cate_col )
return feat[ cols ]
#i2i_score,
#
def feat_item_stage_nunique(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_nunique = all_train_stage_data.groupby(["item_id"])["stage"].nunique()
feat = data[["item"]]
feat["item_stage_nunique"] = feat["item"].map(item_stage_nunique)
return feat[["item_stage_nunique"]]
def feat_item_qtime_time_diff(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
df = data.copy()
feat = df[['item','query_item_time']]
df_v = feat.values
result_history = np.zeros(df_v.shape[0])*np.nan
result_future = np.zeros(df_v.shape[0])*np.nan
for i in range(df_v.shape[0]):
time = df_v[i,1]
time_list = [0]+item_time_list[df_v[i,0]]+[1]
for j in range(1,len(time_list)):
if time<time_list[j]:
result_future[i] = time_list[j]-time
result_history[i] = time-time_list[j-1]
break
feat['item_qtime_time_diff_history'] = result_history
feat['item_qtime_time_diff_future'] = result_future
return feat[['item_qtime_time_diff_history','item_qtime_time_diff_future']]
def feat_item_cumcount(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
df = data.copy()
feat = df[['item','query_item_time']]
df_v = feat.values
result = np.zeros(df_v.shape[0])
for i in range(df_v.shape[0]):
time = df_v[i,1]
time_list = item_time_list[df_v[i,0]]+[1]
for j in range(len(time_list)):
if time<time_list[j]:
result[i] = j
break
feat['item_cumcount'] = result
feat['item_cumrate'] = feat['item_cumcount']/feat['item'].map(df_train['item_id'].value_counts()).fillna(1e-5)
return feat[['item_cumcount','item_cumrate']]
def feat_road_time_bins_cate_cnt(data):
df = data.copy()
categoricals = ['item','road_item','user','recall_type']
feat = df[['road_item_time']+categoricals]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categoricals.append('loc_diff')
feat['road_time_bins'] = pd.Categorical(pd.cut(feat['road_item_time'],100)).codes
cols = []
for cate in categoricals:
cnt = feat.groupby([cate,'road_time_bins']).size()
cnt.name = f'{cate}_cnt_by_road_time_bins'
cols.append(cnt.name)
feat = feat.merge(cnt,how='left',on=[cate,'road_time_bins'])
return feat[cols]
def feat_time_window_cate_count(data):
# 做这个特征之前,先做一次item2time.py
import time as ti
t = ti.time()
df = data.copy()
feat = df[['item','query_item_time']]
df_v = feat.values
del df
try:
item_time_list = utils.load_pickle(item2time_path.format(mode, cur_stage))
except:
raise Exception("做这个特征之前,先做一次item2time.py")
delta_list = np.array(sorted([0.01, 0.02, 0.05, 0.07, 0.1, 0.15]))
delta_list2 = delta_list[::-1]
delta_n = delta_list.shape[0]
n = delta_n*2+1
result_tmp = np.zeros((df_v.shape[0],n))
result_equal = np.zeros(df_v.shape[0])
for i in range(df_v.shape[0]):
time = np.ones(n)*df_v[i,1]
time[:delta_n] -= delta_list2
time[-delta_n:] += delta_list
time_list = item_time_list[df_v[i,0]]+[10]
k = 0
for j in range(len(time_list)):
while k<n and time[k]<time_list[j] :
result_tmp[i,k] = j
k += 1
if time[delta_n]==time_list[j]:
result_equal[i] += 1
result_tmp[i,k:] = j
if i%100000 == 0:
print(f'[{i}/{df_v.shape[0]}]:time {ti.time()-t:.3f}s')
t = ti.time()
result = np.zeros((df_v.shape[0],delta_n*3))
for i in range(delta_n):
result[:,i*3+0] = result_tmp[:,delta_n] - result_tmp[:,i]
result[:,i*3+1] = result_tmp[:,-(i+1)] - result_tmp[:,delta_n] + result_equal
result[:,i*3+2] = result_tmp[:,-(i+1)] - result_tmp[:,i]
cols = [f'item_cnt_{j}_time_{i}' for i in delta_list2 for j in ['before','after','around']]
result = | pd.DataFrame(result,columns=cols) | pandas.DataFrame |
import numpy as np
import pandas as pd
def load_and_preprocess_data():
""" Load data from total_diary_migrain.csv file
and return the data matrix and labels
Returns
-------
features - np.ndarray
Migraine data matrix
features_list - list
List of Features in our migraine data matrix
labels - np.ndarray
Truth labels
"""
data = pd.read_csv('total_diary_migraine.csv', header=0)
features = | pd.DataFrame(data) | pandas.DataFrame |
"""
SPDX-FileCopyrightText: 2019 oemof developer group <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_frame_equal
from windpowerlib.power_curves import (smooth_power_curve,
wake_losses_to_power_curve)
import windpowerlib.wind_turbine as wt
class TestPowerCurves:
@classmethod
def setup_class(self):
self.test_turbine = {'hub_height': 100,
'turbine_type': 'E-126/4200'}
def test_smooth_power_curve(self):
test_curve = wt.WindTurbine(**self.test_turbine).power_curve
parameters = {'power_curve_wind_speeds': test_curve['wind_speed'],
'power_curve_values': test_curve['value'],
'standard_deviation_method': 'turbulence_intensity'}
# Raise ValueError - `turbulence_intensity` missing
with pytest.raises(ValueError):
parameters['standard_deviation_method'] = 'turbulence_intensity'
smooth_power_curve(**parameters)
# Test turbulence_intensity method
parameters['turbulence_intensity'] = 0.5
wind_speed_values_exp = pd.Series([6.0, 7.0, 8.0, 9.0, 10.0],
name='wind_speed')
power_values_exp = pd.Series([
1141906.9806766496, 1577536.8085282773, 1975480.993355767,
2314059.4022704284, 2590216.6802602503], name='value')
smoothed_curve_exp = pd.DataFrame(data=pd.concat([
wind_speed_values_exp, power_values_exp], axis=1))
smoothed_curve_exp.index = np.arange(5, 10, 1)
assert_frame_equal(smooth_power_curve(**parameters)[5:10],
smoothed_curve_exp)
# Test Staffel_Pfenninger method
parameters['standard_deviation_method'] = 'Staffell_Pfenninger'
power_values_exp = pd.Series([
929405.1348918702, 1395532.5468724659, 1904826.6851982325,
2402659.118305521, 2844527.1732449625], name='value')
smoothed_curve_exp = pd.DataFrame(
data= | pd.concat([wind_speed_values_exp, power_values_exp], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import copy
import numpy as np
import pandas as pd
from datamanage.lite.tag import tagaction
from datamanage.pro.datamap import dmaction
from datamanage.pro.datamap.serializers import BasicListSerializer, DataValueSerializer
from datamanage.pro.datastocktake.dataset_process import dataset_filter
from datamanage.pro.datastocktake.metrics.score_level import level_distribution
from datamanage.pro.datastocktake.metrics.storage import storage_capacity_trend
from datamanage.pro.datastocktake.metrics.trend import score_trend_pandas_groupby
from datamanage.pro.datastocktake.sankey_diagram import (
fetch_value_between_source_target,
format_sankey_diagram,
minimal_value,
)
from datamanage.pro.datastocktake.settings import SCORE_DICT
from datamanage.pro.lifecycle.metrics.cost import hum_storage_unit
from datamanage.pro.lifecycle.metrics.ranking import score_aggregate
from datamanage.pro.utils.time import get_date
from datamanage.utils.api.dataquery import DataqueryApi
from django.conf import settings
from django.core.cache import cache
from django.db import connections
from django.utils.translation import ugettext as _
from pandas import DataFrame
from rest_framework.response import Response
from common.bklanguage import bktranslates
from common.decorators import list_route, params_valid
from common.views import APIViewSet
RUN_MODE = getattr(settings, "RUN_MODE", "DEVELOP")
METRIC_DICT = {
"active": "project_id",
"app_important_level_name": "bk_biz_id",
"is_bip": "bk_biz_id",
}
ABN_BIP_GRADE_NAME_LIST = ["确认自研信用", "退市", "已下架"]
CORR_BIP_GRADE_NAME = _("其他")
OPER_STATE_NAME_ORDER = [
"前期接触",
"接入评估",
"接入准备",
"接入中",
"技术封测",
"测试阶段",
"封测",
"内测",
"不删档",
"公测",
"正式运行",
"停运",
"取消",
"退市",
"其他",
]
BIP_GRADE_NAME_ORDER = [
"其他",
"暂无评级",
"长尾",
"三星",
"预备四星",
"四星",
"限制性预备五星",
"预备五星",
"五星",
"预备六星",
"六星",
]
class QueryView(APIViewSet):
@list_route(methods=["get"], url_path="popular_query")
def popular_query(self, request):
"""
@api {get} /datamanage/datastocktake/query/popular_query/ 获取最近热门查询表
@apiVersion 0.1.0
@apiGroup Query
@apiName popular_query
@apiParam {top} int 获取最近热门topn查询表
@apiSuccessExample Success-Response:
{
"errors":null,
"message":"ok",
"code":"1500200",
"data":[
{
"count":20000,
"result_table_name_alias":"xx",
"app_code":"xx",
"result_table_id":"xx"
}
],
"result":true
}
"""
# 获取前日日期,格式:'20200115',用昨日日期的话,凌晨1点前若离线任务没有算完会导致热门查询没有数据
yesterday = get_date()
top = int(request.query_params.get("top", 10))
prefer_storage = "tspider"
sql_latest = """SELECT count, result_table_id, result_table_name_alias, app_code
FROM 591_dataquery_processing_type_rt_alias_one_day
WHERE thedate={} and result_table_id is not null and app_code is not null
order by count desc limit {}""".format(
yesterday, top
)
ret_latest = DataqueryApi.query({"sql": sql_latest, "prefer_storage": prefer_storage}).data
if ret_latest:
ret_latest = ret_latest.get("list", [])
app_code_dict = cache.get("app_code_dict")
for each in ret_latest:
if "app_code" in each:
each["app_code_alias"] = (
app_code_dict[each["app_code"]] if app_code_dict.get(each["app_code"]) else each["app_code"]
)
else:
return Response([])
# 此处缺ret_latest里app_code的中文
return Response(ret_latest)
class DataSourceDistributionView(APIViewSet):
@list_route(methods=["post"], url_path="distribution")
@params_valid(serializer=BasicListSerializer)
def data_source_distribution(self, request, params):
"""
@api {post} /datamanage/datastocktake/data_source/distribution/ 获取数据来源分布情况
@apiVersion 0.1.0
@apiGroup DataSourceDistribution
@apiName data_source_distribution
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"cal_type":["standard","only_standard"],
"page":1,
"page_size":10,
"data_set_type":"all",//result_table、raw_data
"created_by":"xiaoming"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"tag_count_list": [
6,
278
],
"tag_code_list": [
"xx",
"other"
],
"tag_alias_list": [
"XX",
"其他"
]
},
"result": true
}
"""
params.pop("has_standard", None)
result_dict = dmaction.floating_window_query_dgraph(
params,
connections["bkdata_basic_slave"],
dmaction.NEED_DATA_SET_ID_DETAIL,
data_source_distribute=True,
).get("dgraph_result")
# 数据来源对应的数据
tag_code_list = ["components", "sys_host", "system"]
tag_alias_list = [_("组件"), _("设备"), _("系统")]
tag_count_list = [
result_dict.get("c_%s" % each_tag_code)[0].get("count", 0)
for each_tag_code in tag_code_list
if result_dict.get("c_%s" % each_tag_code)
]
# 满足查询条件的总数
rt_count = result_dict.get("rt_count")[0].get("count", 0) if result_dict.get("rt_count") else 0
rd_count = result_dict.get("rd_count")[0].get("count", 0) if result_dict.get("rd_count") else 0
tdw_count = result_dict.get("tdw_count")[0].get("count", 0) if result_dict.get("tdw_count") else 0
total_count = rt_count + rd_count + tdw_count
# 其他对应的数目
other_count = total_count
for each_tag_count in tag_count_list:
other_count -= each_tag_count
tag_count_list.append(other_count)
tag_code_list.append("other")
tag_alias_list.append(_("其他"))
return Response(
{
"tag_count_list": tag_count_list,
"tag_alias_list": tag_alias_list,
"tag_code_list": tag_code_list,
}
)
@list_route(methods=["post"], url_path="detail_distribution")
@params_valid(serializer=BasicListSerializer)
def data_source_detail_distribution(self, request, params):
"""
@api {post} /datamanage/datastocktake/data_source/detail_distribution/ 获取数据来源详细分布情况
@apiVersion 0.1.0
@apiGroup DataSourceDistribution
@apiName data_source_detail_distribution
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"cal_type":["standard","only_standard"],
"page":1,
"page_size":10,
"data_set_type":"all",//result_table、raw_data
"created_by":"xiaoming",
"top":5,
"parent_tag_code":"components"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"tag_count_list": [
6,
278
],
"tag_code_list": [
"xx",
"other"
],
"tag_alias_list": [
"XX",
"其他"
]
},
"result": true
}
"""
params.pop("has_standard", None)
top = params.get("top", 5)
parent_code = params.get("parent_tag_code", "all")
# 1 对所有数据来源标签的查询结果
result_dict = dmaction.floating_window_query_dgraph(
params,
connections["bkdata_basic_slave"],
dmaction.NEED_DATA_SET_ID_DETAIL,
data_source_detail_distribute=True,
)
dgraph_result = result_dict.get("dgraph_result")
data_source_tag_list = result_dict.get("data_source_tag_list")
for each_tag in data_source_tag_list:
each_tag["count"] = (
dgraph_result.get("c_%s" % each_tag.get("tag_code"))[0].get("count", 0)
if dgraph_result.get("c_%s" % each_tag.get("tag_code"))
else 0
)
# 按照count对所有数据来源二级标签进行排序
data_source_tag_list.sort(key=lambda k: (k.get("count", 0)), reverse=True)
if parent_code == "all":
top_tag_list = data_source_tag_list[:top]
# 满足查询条件的总数
rt_count = dgraph_result.get("rt_count")[0].get("count", 0) if dgraph_result.get("rt_count") else 0
rd_count = dgraph_result.get("rd_count")[0].get("count", 0) if dgraph_result.get("rd_count") else 0
tdw_count = dgraph_result.get("tdw_count")[0].get("count", 0) if dgraph_result.get("tdw_count") else 0
total_count = rt_count + rd_count + tdw_count
else:
total_count = data_source_tag_list[0].get("count")
top_tag_list = data_source_tag_list[1 : (top + 1)]
other_count = total_count
for each_tag_count in top_tag_list:
other_count -= each_tag_count.get("count")
# 数据来源对应的数据
tag_code_list = [each_tag.get("tag_code") for each_tag in top_tag_list]
tag_alias_list = [each_tag.get("tag_alias") for each_tag in top_tag_list]
tag_count_list = [each_tag.get("count") for each_tag in top_tag_list]
if other_count > 0:
tag_count_list.append(other_count)
tag_code_list.append("other")
tag_alias_list.append(_("其他"))
return Response(
{
"tag_count_list": tag_count_list,
"tag_alias_list": tag_alias_list,
"tag_code_list": tag_code_list,
}
)
class DataTypeDistributionView(APIViewSet):
@list_route(methods=["post"], url_path="distribution")
@params_valid(serializer=BasicListSerializer)
def data_type_distribution(self, request, params):
"""
@api {post} /datamanage/datastocktake/data_type/distribution/ 获取数据类型分布情况
@apiVersion 0.1.0
@apiGroup DataTypeDistribution
@apiName data_type_distribution
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"cal_type":["standard","only_standard"],
"page":1,
"page_size":10,
"data_set_type":"all",//result_table、raw_data
"created_by":"xiaoming"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"tag_count_list": [
6,
278
],
"tag_code_list": [
"xx",
"other"
],
"tag_alias_list": [
"XX",
"其他"
]
},
"result": true
}
"""
params.pop("has_standard", None)
# 1 对所有数据来源标签的查询结果
result_dict = dmaction.floating_window_query_dgraph(
params,
connections["bkdata_basic_slave"],
dmaction.NEED_DATA_SET_ID_DETAIL,
data_type_distribute=True,
)
dgraph_result = result_dict.get("dgraph_result")
data_type_tag_list = result_dict.get("data_source_tag_list")
for each_tag in data_type_tag_list:
each_tag["count"] = (
dgraph_result.get("c_%s" % each_tag.get("tag_code"))[0].get("count", 0)
if dgraph_result.get("c_%s" % each_tag.get("tag_code"))
else 0
)
# 满足查询条件的总数
rt_count = dgraph_result.get("rt_count")[0].get("count", 0) if dgraph_result.get("rt_count") else 0
rd_count = dgraph_result.get("rd_count")[0].get("count", 0) if dgraph_result.get("rd_count") else 0
tdw_count = dgraph_result.get("tdw_count")[0].get("count", 0) if dgraph_result.get("tdw_count") else 0
total_count = rt_count + rd_count + tdw_count
other_count = total_count
for each_tag_count in data_type_tag_list:
other_count -= each_tag_count.get("count")
# 数据来源对应的数据
tag_code_list = [each_tag.get("tag_code") for each_tag in data_type_tag_list]
tag_alias_list = [each_tag.get("tag_alias") for each_tag in data_type_tag_list]
tag_count_list = [each_tag.get("count") for each_tag in data_type_tag_list]
if other_count > 0:
tag_count_list.append(other_count)
tag_code_list.append("other")
tag_alias_list.append(_("其他"))
return Response(
{
"tag_count_list": tag_count_list,
"tag_alias_list": tag_alias_list,
"tag_code_list": tag_code_list,
}
)
class SankeyDiagramView(APIViewSet):
@list_route(methods=["post"], url_path="distribution")
@params_valid(serializer=BasicListSerializer)
def sankey_diagram_distribution(self, request, params):
"""
@api {post} /datamanage/datastocktake/sankey_diagram/distribution/ 获取桑基图
@apiGroup SankeyDiagramView
@apiName sankey_diagram_distribution
"""
params.pop("has_standard", None)
level = params.get("level", 4)
platform = params.get("platform", "all")
if platform == "tdw":
return Response(
{
"label": [],
"source": [],
"target": [],
"value": [],
"alias": [],
"other_app_code_list": [],
"level": 0,
}
)
# 1 对所有数据来源标签的查询结果
result_dict = dmaction.floating_window_query_dgraph(
params,
connections["bkdata_basic_slave"],
dmaction.NEED_DATA_SET_ID_DETAIL,
sankey_diagram_distribute=True,
)
dgraph_result = result_dict.get("dgraph_result")
rt_count = dgraph_result.get("rt_count")[0].get("count") if dgraph_result.get("rt_count") else 0
if rt_count == 0:
return Response(
{
"label": [],
"source": [],
"target": [],
"value": [],
"alias": [],
"other_app_code_list": [],
"level": 0,
}
)
first_level_tag_list = result_dict.get("first_level_tag_list")
second_level_tag_list = result_dict.get("second_level_tag_list")
# 第一层label
label = []
alias_list = []
for each_tag in first_level_tag_list:
if each_tag.get("tag_code") and each_tag.get("tag_alias"):
label.append(each_tag.get("tag_code"))
alias_list.append(each_tag.get("tag_alias"))
# 其他标签
label.append("other")
alias_list.append(_("其他"))
source = []
target = []
value = []
second_level_have_data_list = []
for each_tag in second_level_tag_list:
# 第二层label
if each_tag.get("tag_code") and each_tag.get("tag_code") not in label:
label.append(each_tag.get("tag_code"))
alias_list.append(each_tag.get("tag_alias"))
# 第一层和第二层之间的value
if dgraph_result.get("c_%s" % each_tag.get("tag_code")) and dgraph_result.get(
"c_%s" % each_tag.get("tag_code")
)[0].get("count"):
# 记录第二层有哪些节点有数据
if each_tag.get("tag_code") not in second_level_have_data_list:
second_level_have_data_list.append(each_tag.get("tag_code"))
source.append(label.index(each_tag.get("parent_code")))
target.append(label.index(each_tag.get("tag_code")))
value.append(dgraph_result.get("c_%s" % each_tag.get("tag_code"))[0].get("count"))
processing_type_dict = copy.deepcopy(dmaction.processing_type_dict)
processing_type_dict["batch_model"] = _("ModelFlow模型(batch)")
processing_type_dict["stream_model"] = _("ModelFlow模型(stream)")
fetch_value_between_source_target(
second_level_tag_list,
dgraph_result,
processing_type_dict,
label,
alias_list,
source,
target,
value,
)
other_app_code_list = []
if level == 4:
other_app_code_list, real_level = format_sankey_diagram(
params,
request,
source,
target,
value,
level,
label,
alias_list,
processing_type_dict,
)
# 第二层存在的节点到第三层之间没有link,导致第二层的节点放置在第三层
for each_tag in second_level_have_data_list:
if each_tag in label and label.index(each_tag) not in source:
for each_processing_type in dmaction.processing_type_list:
if each_processing_type in label:
source.append(label.index(each_tag))
target.append(label.index(each_processing_type))
value.append(minimal_value)
break
# 将分类中"其他"放在第二层,即将"其他"前加输入
if "other" in label:
for each_tag in first_level_tag_list:
if each_tag.get("tag_code") in label and label.index(each_tag.get("tag_code")) in source:
source.append(label.index(each_tag.get("tag_code")))
target.append(label.index("other"))
value.append(minimal_value)
break
# 对应processing_type没有作为分类的输出节点,把"其他"作为该processing_type的输入
for each_processing_type in dmaction.processing_type_list:
if (
each_processing_type in label
and label.index(each_processing_type) not in target
and "other" in label
and (label.index("other") in source or label.index("other") in target)
):
source.append(label.index("other"))
target.append(label.index(each_processing_type))
value.append(minimal_value)
# 判断其他后面有无接processing_type
# 在某些搜索条件"其他"没有数据的情况下,可能其他和第三层processing_type之间没有连接,导致其他变到最后一层
if label.index("other") not in source:
# other在source中的位置
for each_processing_type in dmaction.processing_type_list:
if each_processing_type in label and label.index(each_processing_type) in target:
source.append(label.index("other"))
target.append(label.index(each_processing_type))
value.append(minimal_value)
break
return Response(
{
"label": label,
"source": source,
"target": target,
"value": value,
"alias": alias_list,
"other_app_code_list": other_app_code_list,
"level": real_level,
}
)
class TagTypeView(APIViewSet):
@list_route(methods=["get"], url_path="info")
def tag_type_info(self, request):
"""
@api {get} /datamanage/datastocktake/tag_type/info/ 获取不同分类tag基本信息
@apiVersion 0.1.0
@apiGroup TagTypeView
@apiName tag_type_info
"""
sql = """select name, alias, description from tag_type_config limit 10"""
tag_type_list = tagaction.query_direct_sql_to_map_list(connections["bkdata_basic_slave"], sql)
# 对tag_type_info描述作翻译
for each_tag_type in tag_type_list:
each_tag_type["alias"] = bktranslates(each_tag_type["alias"])
each_tag_type["description"] = bktranslates(each_tag_type["description"])
return Response(tag_type_list)
class NodeCountDistributionView(APIViewSet):
@list_route(methods=["post"], url_path="distribution_filter")
@params_valid(serializer=DataValueSerializer)
def node_count_distribution_filter(self, request, params):
"""
@api {get} /datamanage/datastocktake/node_count/distribution_filter/ 后继节点分布情况
@apiVersion 0.1.0
@apiGroup NodeCountDistributionView
@apiName node_count_distribution
"""
# 从redis拿到热度、广度相关明细数据
metric_list = cache.get("data_value_stocktake")
platform = params.get("platform", "all")
if (not metric_list) or platform == "tdw":
return Response({"x": [], "y": [], "z": []})
has_filter_cond, filter_dataset_dict = dataset_filter(params, request)
if has_filter_cond and not filter_dataset_dict:
return Response({"x": [], "y": [], "z": []})
num_list = []
for each in metric_list:
if not filter_dataset_dict or each["dataset_id"] in filter_dataset_dict:
num_list.append(each["node_count"])
# num_list = [each['node_count'] for each in metric_list]
x = [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"10-15",
"15-20",
"20-25",
"25-30",
"30-40",
"40-50",
"50-100",
">100",
]
bins = [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
15,
20,
25,
30,
40,
50,
100,
np.inf,
]
score_cat = pd.cut(num_list, bins, right=False)
bin_result_list = pd.value_counts(score_cat, sort=False).values.tolist()
sum_count = len(metric_list)
z = [round(each / float(sum_count), 7) for each in bin_result_list]
return Response({"x": x, "y": bin_result_list, "z": z})
class ScoreDistributionView(APIViewSet):
@list_route(methods=["post"], url_path="range_filter")
@params_valid(serializer=DataValueSerializer)
def range_score_distribution_filter(self, request, params):
"""
@api {get} /datamanage/datastocktake/score_distribution/range_filter/ 广度评分分布
@apiVersion 0.1.0
@apiGroup ScoreDistributionView
@apiName range_score_distribution
@apiSuccess (输出) {String} data.y 评分,代表y轴数据
@apiSuccess (输出) {String} data.x index
@apiSuccess (输出) {String} data.perc 当前评分对应的数据占比
@apiSuccess (输出) {String} data.cum_perc 评分>=当前评分的数据累计占比
@apiSuccess (输出) {String} data.cnt 气泡大小
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors":null,
"message":"ok",
"code":"1500200",
"data":{
"perc":[],
"cum_perc":[],
"cnt":[],
"y":[],
"x":[],
"z":[]
},
"result":true
}
"""
# 从redis拿到热度、广度相关明细数据
metric_list = cache.get("data_value_stocktake")
platform = params.get("platform", "all")
if (not metric_list) or platform == "tdw":
return Response(
{
"x": [],
"y": [],
"z": [],
"cnt": [],
"cum_perc": [],
"perc": [],
"80": 0,
"15": 0,
}
)
# has_filter_cond 是否有过滤条件
# filter_dataset_dict 过滤后的结果,没有符合条件的过滤结果和没有过滤条件都为{}
has_filter_cond, filter_dataset_dict = dataset_filter(params, request)
if has_filter_cond and not filter_dataset_dict:
return Response(
{
"x": [],
"y": [],
"z": [],
"cnt": [],
"cum_perc": [],
"perc": [],
"80": 0,
"15": 0,
}
)
range_score_list = []
for each in metric_list:
if not filter_dataset_dict or each["dataset_id"] in filter_dataset_dict:
range_score_list.append(
{
"count": each["dataset_id"],
"score": each["normalized_range_score"],
}
)
res_dict = score_aggregate(range_score_list)
return Response(res_dict)
@list_route(methods=["post"], url_path="heat_filter")
@params_valid(serializer=DataValueSerializer)
def heat_score_distribution_filter(self, request, params):
"""
@api {get} /datamanage/datastocktake/score_distribution/heat_filter/ 热度评分分布
@apiVersion 0.1.0
@apiGroup ScoreDistributionView
@apiName heat_score_distribution
@apiSuccess (输出) {String} data.y 评分,代表y轴数据
@apiSuccess (输出) {String} data.x index
@apiSuccess (输出) {String} data.perc 当前评分对应的数据占比
@apiSuccess (输出) {String} data.cum_perc 评分>=当前评分的数据累计占比
@apiSuccess (输出) {String} data.cnt 气泡大小
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors":null,
"message":"ok",
"code":"1500200",
"data":{
"perc":[],
"cum_perc":[],
"cnt":[],
"y":[],
"x":[],
"z":[]
},
"result":true
}
"""
# 从redis拿到热度、广度相关明细数据
metric_list = cache.get("data_value_stocktake")
platform = params.get("platform", "all")
if (not metric_list) or platform == "tdw":
return Response(
{
"x": [],
"y": [],
"z": [],
"cnt": [],
"cum_perc": [],
"perc": [],
"80": 0,
"15": 0,
}
)
has_filter_cond, filter_dataset_dict = dataset_filter(params, request)
if has_filter_cond and not filter_dataset_dict:
return Response(
{
"x": [],
"y": [],
"z": [],
"cnt": [],
"cum_perc": [],
"perc": [],
"80": 0,
"15": 0,
}
)
heat_score_list = []
for each in metric_list:
if not filter_dataset_dict or each["dataset_id"] in filter_dataset_dict:
heat_score_list.append({"count": each["dataset_id"], "score": each["heat_score"]})
res_dict = score_aggregate(heat_score_list)
return Response(res_dict)
@list_route(methods=["post"], url_path=r"(?P<score_type>\w+)")
@params_valid(serializer=DataValueSerializer)
def score_distribution(self, request, score_type, params):
"""
@api {post} /datamanage/datastocktake/score_distribution/:score_type/ 价值、收益比、热度、广度、重要度等评分分布气泡图
@apiVersion 0.1.0
@apiGroup Datastocktake/ScoreDistribution
@apiName score_distribution
@apiParam {String} score_type importance/heat/range/asset_value/assetvalue_to_cost
@apiSuccess (输出) {String} data.y 评分,代表y轴数据
@apiSuccess (输出) {String} data.x index
@apiSuccess (输出) {String} data.z 当前评分对应的数据个数
@apiSuccess (输出) {String} data.perc 当前评分对应的数据占比
@apiSuccess (输出) {String} data.cum_perc 评分>=当前评分的数据累计占比
@apiSuccess (输出) {String} data.cnt 气泡大小
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[
],
"keyword":"",
"tag_code":"virtual_data_mart",
"me_type":"tag",
"has_standard":1,
"cal_type":[
"standard"
],
"data_set_type":"all",
"page":1,
"page_size":10,
"platform":"bk_data",
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors":null,
"message":"ok",
"code":"1500200",
"data":{
"perc":[],
"cum_perc":[],
"cnt":[],
"y":[],
"x":[],
"z":[]
},
"result":true
}
"""
# 从redis拿到生命周期相关明细数据
metric_list = cache.get("data_value_stocktake")
platform = params.get("platform", "all")
if (not metric_list) or platform == "tdw":
return Response({"x": [], "y": [], "z": [], "cnt": [], "cum_perc": [], "perc": []})
has_filter_cond, filter_dataset_dict = dataset_filter(params, request)
if has_filter_cond and not filter_dataset_dict:
return Response({"x": [], "y": [], "z": [], "cnt": [], "cum_perc": [], "perc": []})
score_list = []
for each in metric_list:
if not filter_dataset_dict or each["dataset_id"] in filter_dataset_dict:
if each[SCORE_DICT[score_type]] >= 0:
score_list.append(
{
"count": each["dataset_id"],
"score": each[SCORE_DICT[score_type]],
}
)
if not score_list:
return Response({"x": [], "y": [], "z": [], "cnt": [], "cum_perc": [], "perc": []})
res_dict = score_aggregate(score_list, score_type=score_type)
return Response(res_dict)
@list_route(methods=["post"], url_path=r"trend/(?P<score_type>\w+)")
@params_valid(serializer=DataValueSerializer)
def score_trend(self, request, score_type, params):
"""
@api {post} /datamanage/datastocktake/score_distribution/trend/:score_type/ 价值、重要度等评分趋势
@apiVersion 0.1.0
@apiGroup Datastocktake/ScoreDistribution
@apiName score_trend
@apiParam {String} score_type 生命周期评分指标 importance/heat/range/asset_value/assetvalue_to_cost
@apiParamExample {json} 参数样例:
HTTP/1.1 http://{domain}/v3/datamanage/datastocktake/score_distribution/trend/importance/
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[
],
"keyword":"",
"tag_code":"virtual_data_mart",
"me_type":"tag",
"has_standard":1,
"cal_type":[
"standard"
],
"data_set_type":"all",
"page":1,
"page_size":10,
"platform":"bk_data",
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": [
{
"index": 0,
"num": [
10513,
10383,
10323,
10147,
10243,
10147,
10357,
10147
],
"score_level": "[0,25]",
"time": [
"08-06",
"08-07",
"08-09",
"08-12",
"08-10",
"08-13",
"08-08",
"08-11"
]
},
{
"index": 1,
"num": [
37118,
37177,
37222,
37349,
37279,
37349,
37203,
37349
],
"score_level": "(25,50]",
"time": [
"08-06",
"08-07",
"08-09",
"08-12",
"08-10",
"08-13",
"08-08",
"08-11"
]
},
{
"index": 2,
"num": [
37119,
37179,
37181,
37205,
37189,
37205,
37179,
37205
],
"score_level": "(50,80]",
"time": [
"08-06",
"08-07",
"08-09",
"08-12",
"08-10",
"08-13",
"08-08",
"08-11"
]
},
{
"index": 3,
"num": [
7691,
7702,
7715,
7740,
7730,
7740,
7702,
7740
],
"score_level": "(80,100]",
"time": [
"08-06",
"08-07",
"08-09",
"08-12",
"08-10",
"08-13",
"08-08",
"08-11"
]
}
],
"result": true
}
"""
# 1)get data_trend_df from redis
data_trend_df = cache.get("data_trend_df")
platform = params.get("platform", "all")
# data_trend_df
if data_trend_df.empty or platform == "tdw":
return Response([])
# 2)判断是否有搜索条件 & 搜索命中的数据集
has_filter_cond, filter_dataset_dict = dataset_filter(params, request)
if has_filter_cond and not filter_dataset_dict:
return Response([])
# 3)评分趋势
ret = score_trend_pandas_groupby(data_trend_df, filter_dataset_dict, score_type)
return Response(ret)
@list_route(methods=["post"], url_path=r"level_and_trend/(?P<score_type>\w+)")
@params_valid(serializer=DataValueSerializer)
def score_level_and_trend(self, request, score_type, params):
"""
@api {post} /datamanage/datastocktake/score_distribution/level_and_trend/:score_type/ 价值、重要度等等级分布&评分趋势
@apiVersion 0.1.0
@apiGroup Datastocktake/ScoreDistribution
@apiName score_level_and_trend
@apiParam {String} score_type 生命周期评分指标 importance/heat/range/asset_value/assetvalue_to_cost
@apiParamExample {json} 参数样例:
HTTP/1.1 http://{domain}/v3/datamanage/datastocktake/score_distribution/level_and_trend/importance/
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[
],
"keyword":"",
"tag_code":"virtual_data_mart",
"me_type":"tag",
"has_standard":1,
"cal_type":[
"standard"
],
"data_set_type":"all",
"page":1,
"page_size":10,
"platform":"bk_data",
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors":null,
"message":"ok",
"code":"1500200",
"data":{
"score_trend":[
{
"index":0,
"num":[
10513,
10383,
10323,
10147,
10243,
10147,
10357
],
"score_level":"[0,25]",
"time":[
"08-06",
"08-07",
"08-09",
"08-12",
"08-10",
"08-13",
"08-08"
]
},
{
"index":1,
"num":[
37118,
37177,
37222,
37349,
37279,
37349,
37203
],
"score_level":"(25,50]",
"time":[
"08-06",
"08-07",
"08-09",
"08-12",
"08-10",
"08-13",
"08-08"
]
},
{
"index":2,
"num":[
37119,
37179,
37181,
37205,
37189,
37205,
37179
],
"score_level":"(50,80]",
"time":[
"08-06",
"08-07",
"08-09",
"08-12",
"08-10",
"08-13",
"08-08"
]
},
{
"index":3,
"num":[
7691,
7702,
7715,
7740,
7730,
7740,
7702
],
"score_level":"(80,100]",
"time":[
"08-06",
"08-07",
"08-09",
"08-12",
"08-10",
"08-13",
"08-08"
]
}
],
"level_distribution":{
"y":[
245,
8010,
1887,
1657
],
"x":[
"[0,10]",
"(10,50]",
"(50,75]",
"(75,100]"
],
"sum_count":11799,
"z":[
0.0207645,
0.6788711,
0.1599288,
0.1404356
]
}
},
"result":true
}
"""
metric_list = cache.get("data_value_stocktake")
data_trend_df = cache.get("data_trend_df")
platform = params.get("platform", "all")
if (not metric_list and data_trend_df.empty) or platform == "tdw":
return Response(
{
"score_trend": [],
"level_distribution": {"x": [], "y": [], "z": [], "sum_count": 0},
}
)
has_filter_cond, filter_dataset_dict = dataset_filter(params, request)
if has_filter_cond and not filter_dataset_dict:
return Response(
{
"score_trend": [],
"level_distribution": {"x": [], "y": [], "z": [], "sum_count": 0},
}
)
# 1)评分等级分布
if not metric_list:
x = []
bin_result_list = []
z = []
sum_count = 0
else:
x, bin_result_list, z, sum_count = level_distribution(metric_list, filter_dataset_dict, score_type)
# 2)评分趋势
if data_trend_df.empty:
trend_ret_list = []
else:
trend_ret_list = score_trend_pandas_groupby(data_trend_df, filter_dataset_dict, score_type)
return Response(
{
"score_trend": trend_ret_list,
"level_distribution": {
"x": x,
"y": bin_result_list,
"z": z,
"sum_count": sum_count,
},
}
)
class LevelDistributionView(APIViewSet):
@list_route(methods=["post"], url_path=r"(?P<score_type>\w+)")
@params_valid(serializer=DataValueSerializer)
def level_distribution(self, request, score_type, params):
"""
@api {post} /datamanage/datastocktake/level_distribution/:score_type/ 价值、重要度等等级分布
@apiVersion 0.1.0
@apiGroup Datastocktake/LevelDistribution
@apiName level_distribution
@apiParam {String} score_type 生命周期评分指标 importance/heat/range/asset_value/assetvalue_to_cost
@apiSuccess (输出) {String} data.x 等级区间
@apiSuccess (输出) {String} data.y 数据个数
@apiSuccess (输出) {String} data.z 数据占比
@apiSuccess (输出) {String} data.sum_count 数据表总数
@apiParamExample {json} 参数样例:
HTTP/1.1 http://{domain}/v3/datamanage/datastocktake/level_distribution/importance/
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[
],
"keyword":"",
"tag_code":"virtual_data_mart",
"me_type":"tag",
"has_standard":1,
"cal_type":[
"standard"
],
"data_set_type":"all",
"page":1,
"page_size":10,
"platform":"bk_data",
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"y": [
245,
8010,
1887,
1657
],
"x": [
"[0,10]",
"(10,50]",
"(50,75]",
"(75,100]"
],
"sum_count": 11799,
"z": [
0.0207645,
0.6788711,
0.1599288,
0.1404356
]
},
"result": true
}
"""
# 从redis拿到热度、广度相关明细数据
metric_list = cache.get("data_value_stocktake")
platform = params.get("platform", "all")
if (not metric_list) or platform == "tdw":
return Response({"x": [], "y": [], "z": [], "sum_count": 0})
has_filter_cond, filter_dataset_dict = dataset_filter(params, request)
if has_filter_cond and not filter_dataset_dict:
return Response({"x": [], "y": [], "z": [], "sum_count": 0})
x, bin_result_list, z, sum_count = level_distribution(metric_list, filter_dataset_dict, score_type)
return Response({"x": x, "y": bin_result_list, "z": z, "sum_count": sum_count})
class CostDistribution(APIViewSet):
@list_route(methods=["post"], url_path="storage_capacity")
@params_valid(serializer=DataValueSerializer)
def storage_capacity(self, request, params):
"""
@api {post} /datamanage/datastocktake/cost_distribution/storage_capacity/ 存储分布
@apiVersion 0.1.0
@apiGroup Datastocktake/CostDistribution
@apiName storage_capacity
@apiSuccess (输出) {String} data.capacity_list 不同存储的大小
@apiSuccess (输出) {String} data.label 不同存储的名称
@apiSuccess (输出) {String} data.sum_capacity 总存储大小
@apiSuccess (输出) {String} data.unit 存储单位,自适应单位
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"capacity_list": [],
"label": [],
"sum_capacity": 0,
"unit": "TB"
},
"result": true
}
"""
metric_list = cache.get("data_value_stocktake")
platform = params.get("platform", "all")
if (not metric_list) or platform == "tdw":
return Response({"capacity_list": [], "label": [], "sum_capacity": 0, "unit": "MB"})
has_filter_cond, filter_dataset_dict = dataset_filter(params, request)
if has_filter_cond and not filter_dataset_dict:
return Response({"capacity_list": [], "label": [], "sum_capacity": 0, "unit": "MB"})
hdfs_list = []
tspider_list = []
for each in metric_list:
if not filter_dataset_dict or each["dataset_id"] in filter_dataset_dict:
hdfs_list.append(each["hdfs_capacity"])
tspider_list.append(each["tspider_capacity"])
sum_hdfs = sum(hdfs_list)
sum_tspider = sum(tspider_list)
sum_capacity = sum_hdfs + sum_tspider
format_max_capacity, unit, power = hum_storage_unit(sum_capacity, return_unit=True)
sum_hdfs = round(sum_hdfs / float(1024 ** power), 3)
sum_tspider = round(sum_tspider / float(1024 ** power), 3)
sum_capacity = round(sum_capacity / float(1024 ** power), 3)
capacity_list = [sum_hdfs, sum_tspider]
label = ["hdfs", "tspider"]
return Response(
{
"capacity_list": capacity_list,
"label": label,
"sum_capacity": sum_capacity,
"unit": unit,
}
)
@list_route(methods=["post"], url_path="storage_trend")
@params_valid(serializer=DataValueSerializer)
def storage_trend(self, request, params):
"""
@api {post} /datamanage/datastocktake/cost_distribution/storage_trend/ 存储成本趋势
@apiVersion 0.1.0
@apiGroup Datastocktake/CostDistribution
@apiName storage_trend
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"tspider_capacity": [
23,
23,
23,
23,
23,
23
],
"hdfs_capacity": [
1000,
1000,
1000,
1000,
1000,
1000
],
"total_capacity": [
1023,
1023,
1023,
1023,
1023,
1023
],
"unit": "TB",
"time": [
"08-13",
"08-14",
"08-15",
"08-16",
"08-17",
"08-18"
]
},
"result": true
}
"""
data_trend_df = cache.get("data_trend_df")
platform = params.get("platform", "all")
if data_trend_df.empty or platform == "tdw":
return Response(
{
"tspider_capacity": [],
"hdfs_capacity": [],
"total_capacity": [],
"time": [],
"unit": "B",
}
)
has_filter_cond, filter_dataset_dict = dataset_filter(params, request)
if has_filter_cond and not filter_dataset_dict:
return Response(
{
"tspider_capacity": [],
"hdfs_capacity": [],
"total_capacity": [],
"time": [],
"unit": "B",
}
)
ret_dict = storage_capacity_trend(data_trend_df, filter_dataset_dict)
return Response(ret_dict)
class ImportanceDistribution(APIViewSet):
@list_route(methods=["post"], url_path=r"(?P<metric_type>\w+)")
@params_valid(serializer=DataValueSerializer)
def importance_metric_distribution(self, request, metric_type, params):
"""
@api {post} /datamanage/datastocktake/importance_distribution/:metric_type/
业务重要度、关联BIP、项目运营状态、数据敏感度分布、数据生成类型
@apiVersion 0.1.0
@apiGroup Datastocktake/ImportanceDistribution
@apiName importance_metric_distribution
@apiParam {String} metric_type 重要度相关指标 app_important_level_name/is_bip/active/sensitivity/generate_type
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors":null,
"message":"ok",
"code":"1500200",
"data":{
"dataset_count":[
10274,
1557
],
"metric":[
false,
true
],
"dataset_perct":[
0.8683966,
0.1316034
],
"biz_count":[
317,
30
],
"biz_perct":[
0.9135447,
0.0864553
]
},
"result":true
}
"""
metric_list = cache.get("data_value_stocktake")
platform = params.get("platform", "all")
if (not metric_list) or platform == "tdw":
return Response({"metric": [], "dataset_count": [], "dataset_perct": []})
has_filter_cond, filter_dataset_dict = dataset_filter(params, request)
if has_filter_cond and not filter_dataset_dict:
return Response({"metric": [], "dataset_count": [], "dataset_perct": []})
importance_metric_list = []
for each in metric_list:
if not filter_dataset_dict or each["dataset_id"] in filter_dataset_dict:
if metric_type not in list(METRIC_DICT.keys()):
importance_metric_list.append({"dataset_id": each["dataset_id"], "metric": each[metric_type]})
else:
importance_metric_list.append(
{
"dataset_id": each["dataset_id"],
"metric": each[metric_type],
METRIC_DICT[metric_type]: each[METRIC_DICT[metric_type]],
}
)
df1 = DataFrame(importance_metric_list)
df2 = df1.groupby("metric", as_index=False).count().sort_values(["metric"], axis=0, ascending=True)
metric_count_agg_list = df2.to_dict(orient="records")
metric = [each["metric"] for each in metric_count_agg_list]
dataset_count = [each["dataset_id"] for each in metric_count_agg_list]
metric_sum = len(metric_list)
dataset_perct = [
round(each["dataset_id"] / float(metric_sum), 7) if metric_sum else 0 for each in metric_count_agg_list
]
if metric_type in list(METRIC_DICT.keys()):
df3 = (
df1.groupby(["metric", METRIC_DICT[metric_type]], as_index=False)
.count()
.sort_values(["metric"], axis=0, ascending=True)
)
df4 = df3.groupby("metric", as_index=False).count().sort_values(["metric"], axis=0, ascending=True)
count_agg_list = df4.to_dict(orient="records")
count = [each["dataset_id"] for each in count_agg_list]
count_sum = sum(count)
perct = [round(each["dataset_id"] / float(count_sum), 7) if count_sum else 0 for each in count_agg_list]
if METRIC_DICT[metric_type] == "bk_biz_id":
return Response(
{
"metric": metric,
"dataset_count": dataset_count,
"dataset_perct": dataset_perct,
"biz_count": count,
"biz_perct": perct,
}
)
else:
return Response(
{
"metric": metric,
"dataset_count": dataset_count,
"dataset_perct": dataset_perct,
"project_count": count,
"project_perct": perct,
}
)
return Response(
{
"metric": metric,
"dataset_count": dataset_count,
"dataset_perct": dataset_perct,
}
)
@list_route(methods=["post"], url_path="biz")
@params_valid(serializer=DataValueSerializer)
def bip_grade_and_oper_state_distr(self, request, params):
"""
@api {post} /datamanage/datastocktake/importance_distribution/biz/ 业务星际&运营状态分布
@apiVersion 0.1.0
@apiGroup Datastocktake/ImportanceDistribution
@apiName bip_grade_and_oper_state_distribution
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"dataset_count": [
148
],
"oper_state_name": [
"不删档"
],
"biz_count": [
6
],
"bip_grade_name": [
"三星"
]
},
"result": true
}
"""
metric_list = cache.get("data_value_stocktake")
platform = params.get("platform", "all")
if (not metric_list) or platform == "tdw":
return Response(
{
"dataset_count": [],
"oper_state_name": [],
"biz_count": [],
"bip_grade_name": [],
}
)
has_filter_cond, filter_dataset_dict = dataset_filter(params, request)
if has_filter_cond and not filter_dataset_dict:
return Response(
{
"dataset_count": [],
"oper_state_name": [],
"biz_count": [],
"bip_grade_name": [],
}
)
biz_metric_list = []
for each in metric_list:
if not filter_dataset_dict or each["dataset_id"] in filter_dataset_dict:
bip_grade_name = (
each["bip_grade_name"]
if each["bip_grade_name"] not in ABN_BIP_GRADE_NAME_LIST
else CORR_BIP_GRADE_NAME
)
biz_metric_list.append(
{
"dataset_id": each["dataset_id"],
"oper_state_name": each["oper_state_name"],
"bip_grade_name": bip_grade_name,
"bk_biz_id": each["bk_biz_id"],
}
)
df1 = DataFrame(biz_metric_list)
# 按照oper_state_name和bip_grade_name聚合,按照bip_grade_name排序
df2 = df1.groupby(["oper_state_name", "bip_grade_name"], as_index=False).count()
# 自定义排序顺序,按照bip_grade_name和oper_state_name排序
df2["oper_state_name"] = df2["oper_state_name"].astype("category").cat.set_categories(OPER_STATE_NAME_ORDER)
df2["bip_grade_name"] = df2["bip_grade_name"].astype("category").cat.set_categories(BIP_GRADE_NAME_ORDER)
df2 = df2.dropna()
df2 = df2.sort_values(by=["oper_state_name", "bip_grade_name"], ascending=True)
metric_count_agg_list = df2.to_dict(orient="records")
oper_state_name = [each["oper_state_name"] for each in metric_count_agg_list]
bip_grade_name = [each["bip_grade_name"] for each in metric_count_agg_list]
dataset_count = [each["dataset_id"] for each in metric_count_agg_list]
df3 = df1.groupby(["oper_state_name", "bip_grade_name", "bk_biz_id"], as_index=False).count()
df4 = df3.groupby(["oper_state_name", "bip_grade_name"], as_index=False).count()
# 自定义排序顺序,按照bip_grade_name和oper_state_name排序
df4["oper_state_name"] = df4["oper_state_name"].astype("category").cat.set_categories(OPER_STATE_NAME_ORDER)
df4["bip_grade_name"] = df4["bip_grade_name"].astype("category").cat.set_categories(BIP_GRADE_NAME_ORDER)
df4 = df4.dropna()
df4 = df4.sort_values(by=["oper_state_name", "bip_grade_name"], ascending=True)
count_agg_list = df4.to_dict(orient="records")
biz_count = [each["dataset_id"] for each in count_agg_list]
return Response(
{
"oper_state_name": oper_state_name,
"bip_grade_name": bip_grade_name,
"dataset_count": dataset_count,
"biz_count": biz_count,
}
)
class QueryCountDistributionView(APIViewSet):
@list_route(methods=["post"], url_path="distribution_filter")
@params_valid(serializer=DataValueSerializer)
def day_query_count_distribution_filter(self, request, params):
"""
@api {get} /datamanage/datastocktake/day_query_count/distribution_filter/ 每日查询次数分布情况
@apiVersion 0.1.0
@apiGroup QueryCountDistributionView
@apiName day_query_count_distribution
"""
# 从redis拿到热度、广度相关明细数据
metric_list = cache.get("data_value_stocktake")
platform = params.get("platform", "all")
if (not metric_list) or platform == "tdw":
return Response({"x": [], "y": [], "z": []})
has_filter_cond, filter_dataset_dict = dataset_filter(params, request)
if has_filter_cond and not filter_dataset_dict:
return Response({"x": [], "y": [], "z": []})
num_list = []
for each in metric_list:
if not filter_dataset_dict or each["dataset_id"] in filter_dataset_dict:
num_list.append(int(each["query_count"] / 7.0))
# num_list = [int(each['query_count']/7.0) for each in metric_list]
x = [
"0",
"1",
"2",
"3",
"4",
"5",
"5-10",
"10-20",
"20-30",
"30-40",
"40-50",
"50-100",
"100-500",
"500-1000",
"1000-5000",
">5000",
]
bins = [0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100, 500, 1000, 5000, np.inf]
score_cat = | pd.cut(num_list, bins, right=False) | pandas.cut |
"""
Functions for extracting information from WI tables.
"""
from typing import Union
import numpy as np
import pandas as pd
from . import _labels, _utils
def get_date_ranges(
images: pd.DataFrame = None,
deployments: pd.DataFrame = None,
source: str = "both",
compute_delta: bool = False,
pivot: bool = False,
) -> pd.DataFrame:
"""
Gets deployment date ranges using information from either images,
deployments or both.
Parameters
----------
images : DataFrame
DataFrame with the project's images.
deployments : DataFrame
DataFrame with the project's deployments.
source : bool
Source to plot date ranges from: Values can be:
- 'images' to plot date ranges from images (i.e. first image
to last image taken).
- 'deployments' to plot date ranges from deployments
information (i.e. start date and end date).
- 'both' to plot both sources in two different subplots.
compute_delta : bool
Whether to compute the delta (in days) between the start and end
dates.
pivot : bool
Whether to pivot (reshape from long to wide format) the resulting
DataFrame.
Returns
-------
DataFrame
DataFrame with date ranges.
"""
df = pd.DataFrame()
if source == "images" or source == "both":
if images is None:
raise ValueError("images DataFrame must be provided.")
images = images.copy()
images[_labels.images.date] = pd.to_datetime(images[_labels.images.date])
images[_labels.images.date] = pd.to_datetime(
images[_labels.images.date].dt.date
)
dates = images.groupby(_labels.images.deployment_id)[_labels.images.date].agg(
start_date="min", end_date="max"
)
dates["source"] = "images"
df = pd.concat([df, dates.reset_index()], ignore_index=True)
if source == "deployments" or source == "both":
if deployments is None:
raise ValueError("deployments DataFrame must be provided.")
deployments = deployments.copy()
deployments[_labels.deployments.start] = pd.to_datetime(
deployments[_labels.deployments.start]
)
deployments[_labels.deployments.end] = pd.to_datetime(
deployments[_labels.deployments.end]
)
dates = deployments.loc[
:,
[
_labels.deployments.deployment_id,
_labels.deployments.start,
_labels.deployments.end,
],
]
dates["source"] = "deployments"
df = | pd.concat([df, dates], ignore_index=True) | pandas.concat |
"""
This file is the user interface for the analysis code in this folder. You can execute the code line-by-line to see the steps or execute it altogether to get the output figures produced in the `figures` folder.
Make sure you deactivate the first `try... except...` block if you are launching the code from the terminal
"""
# Allow on-the-fly reloading of the modified project files for iPython-like environments. Disable if you are running in the console
# try:
# has_run
# except NameError:
# %matplotlib tk
# %load_ext autoreload
# %autoreload 2
# has_run = 1
import copy
# Package imports
import os
import matplotlib
import numpy as np
import pandas as pd
from tqdm import trange
# from Dataset import detect_timestep
from calculate import (calculate_alpha, calculate_free_travel_time,
calculate_rho_and_J, calculate_slopes, filter_by_AP,
identify_ncs, perform_welchs_test)
from constants import (AP_hist_folder, data_folder, figures_folder,
matlab_csv_data_file, output_slopes_folder, s13_hb_bac)
from plot import (plot_j_alpha_curve, plot_normalized_current_density_diagram,
plot_parameter_evolution)
from support import reinit_folder
# Do not show figures, just save to files
matplotlib.use('Agg')
reinit_folder(figures_folder)
# Constants
ncs = range(11, 15)
# %% Import
filepath = os.path.join(data_folder, matlab_csv_data_file)
data = pd.read_csv(filepath, sep=',', encoding='utf-8')
print('Loaded columns: ', data.columns.values)
# Calculate the mean AP position for each trace
data['ap_mean'] = data.ap.groupby(data['trace_id']).transform('mean')
# Detect the range of data sets and ncs present in the input file
datasets_len = data.dataset_id.max() + 1
datasets = set(data.dataset_id)
genes = set(data.gene)
# %% Perform AP filtering
filtered_data = filter_by_AP(data)
# %% Detect nuclear cycles
data_with_ncs, nc_limits = identify_ncs(filtered_data)
print('Time limits for each data set:\n', nc_limits)
# %% Initialize the data structure to store results
index = pd.MultiIndex.from_product((datasets, ncs), names=['dataset_id', 'nc'])
analyses = pd.DataFrame(columns=['dataset_name', 'gene', 'gene_id',
'construct'], index=index)
# Copy dataset names, genes and constructs
for id in datasets:
analyses.loc[id, 'gene'] = data[data.dataset_id == id].head(1).gene.values[0]
analyses.loc[id, 'gene_id'] = data[data.dataset_id == id].head(1).gene_id.values[0]
analyses.loc[id, 'construct'] = data[data.dataset_id == id].head(1).construct.values[0]
analyses.loc[id, 'dataset_name'] = data[data.dataset_id == id].head(1).dataset.values[0]
analyses = | pd.concat([analyses, nc_limits], axis='columns') | pandas.concat |
import pandas as pd
pd.set_option('mode.chained_assignment',None)
###################################################
#################### ####################
#################### ANALYTICS ####################
#################### ####################
###################################################
def open_season_data(year, league):
data = pd.read_csv('data/{}/{}_season_{}.csv'.format(league, league, year), parse_dates=['date'], index_col='date').drop(columns=['Unnamed: 0'])
return data
def open_squads_data(year, league):
data = pd.read_csv('data/{}/squads_info_{}.csv'.format(league, year)).drop(columns=['Unnamed: 0'])
return data
def open_value_squad_position_data(year, league):
data = pd.read_csv('data/{}/value_squad_position_{}.csv'.format(league, year)).drop(columns=['Unnamed: 0'])
return data
###################################################
def goals_scored_avg_time(team, year, league, home=False, away=False, all=False):
data = open_season_data(year, league)
if home == True:
result = data[(data.h_team == team) & (data.result == 'Goal') & (data.key == team)]
result['match'] = result['h_team'] + " - " + result['a_team']
result = result.groupby(
[result[(result.h_team == team) & (result.result == 'Goal') & (result.key == team)].index,
result.match,
result.matchday]
).minute.mean()
return result
if away == True:
result = data[(data.a_team == team) & (data.result == 'Goal') & (data.key == team)]
result['match'] = result['h_team'] + " - " + result['a_team']
result = result.groupby(
[result[(result.a_team == team) & (result.result == 'Goal') & (result.key == team)].index,
result.match,
result.matchday]
).minute.mean()
return result
if all == True:
result_home = data[(data.h_team == team) & (data.result == 'Goal') & (data.key == team)]
result_home['match'] = result_home['h_team'] + " - " + result_home['a_team']
result_home = result_home.groupby(
[result_home[(result_home.h_team == team) & (result_home.result == 'Goal') & (result_home.key == team)].index,
result_home.match,
result_home.matchday]
).minute.mean()
result_away = data[(data.a_team == team) & (data.result == 'Goal') & (data.key == team)]
result_away['match'] = result_away['h_team'] + " - " + result_away['a_team']
result_away = result_away.groupby(
[result_away[(result_away.a_team == team) & (result_away.result == 'Goal') & (result_away.key == team)].index,
result_away.match,
result_away.matchday]
).minute.mean()
result_all = result_home.append(result_away)#.sort_index(ascending=True)
result_all = pd.DataFrame(result_all).reset_index()
return result_all
def goals_scored(team, year, league, home=False, away=False, all=False):
data = open_season_data(year, league)
if home == True:
result = data[(data.h_team == team) & (data.result == 'Goal') & (data.key == team)]
result['match'] = result['h_team'] + " - " + result['a_team']
result = result.groupby(
[result[(result.h_team == team) & (result.result == 'Goal') & (result.key == team)].index,
result.match,
result.matchday]
).h_goals.mean()
return result
if away == True:
result = data[(data.a_team == team) & (data.result == 'Goal') & (data.key == team)]
result['match'] = result['h_team'] + " - " + result['a_team']
result = result.groupby(
[result[(result.a_team == team) & (result.result == 'Goal') & (result.key == team)].index,
result.match,
result.matchday]
).h_goals.mean()
return result
if all == True:
result_home = data[(data.h_team == team) & (data.result == 'Goal') & (data.key == team)]
result_home['match'] = result_home['h_team'] + " - " + result_home['a_team']
result_home = result_home.groupby(
[result_home[(result_home.h_team == team) & (result_home.result == 'Goal') & (result_home.key == team)].index,
result_home.match,
result_home.matchday]
).h_goals.mean()
result_away = data[(data.a_team == team) & (data.result == 'Goal') & (data.key == team)]
result_away['match'] = result_away['h_team'] + " - " + result_away['a_team']
result_away = result_away.groupby(
[result_away[(result_away.a_team == team) & (result_away.result == 'Goal') & (result_away.key == team)].index,
result_away.match,
result_away.matchday]
).h_goals.mean()
result_all = result_home.append(result_away).sort_index(ascending=True)
result_all = pd.DataFrame(result_all).reset_index()
return result_all
def goals_conceded_avg_time(team, year, league, home=False, away=False, all=False):
data = open_season_data(year, league)
if home == True:
result = data[(data.h_team == team) & (data.result == 'Goal') & (data.key != team)]
result['match'] = result['h_team'] + " - " + result['a_team']
result = result.groupby(
[result[(result.h_team == team) & (result.result == 'Goal') & (result.key != team)].index,
result.match,
result.matchday]
).minute.mean()
return result
if away == True:
result = data[(data.a_team == team) & (data.result == 'Goal') & (data.key != team)]
result['match'] = result['h_team'] + " - " + result['a_team']
result = result.groupby(
[result[(result.a_team == team) & (result.result == 'Goal') & (result.key != team)].index,
result.match,
result.matchday]
).minute.mean()
return result
if all == True:
result_home = data[(data.h_team == team) & (data.result == 'Goal') & (data.key != team)]
result_home['match'] = result_home['h_team'] + " - " + result_home['a_team']
result_home = result_home.groupby(
[result_home[(result_home.h_team == team) & (result_home.result == 'Goal') & (result_home.key != team)].index,
result_home.match,
result_home.matchday]
).minute.mean()
result_away = data[(data.a_team == team) & (data.result == 'Goal') & (data.key != team)]
result_away['match'] = result_away['h_team'] + " - " + result_away['a_team']
result_away = result_away.groupby(
[result_away[(result_away.a_team == team) & (result_away.result == 'Goal') & (result_away.key != team)].index,
result_away.match,
result_away.matchday]
).minute.mean()
result_all = result_home.append(result_away)#.sort_index(ascending=True)
result_all = pd.DataFrame(result_all).reset_index()
return result_all
def goals_conceded(team, year, league, home=False, away=False, all=False):
data = open_season_data(year, league)
if home == True:
result = data[(data.h_team == team) & (data.result == 'Goal') & (data.key != team)]
result['match'] = result['h_team'] + " - " + result['a_team']
result = result.groupby(
[data[(data.h_team == team) & (data.result == 'Goal') & (data.key != team)].index,
result.match,
result.matchday]
).a_goals.mean()
return result
if away == True:
result = data[(data.a_team == team) & (data.result == 'Goal') & (data.key != team)]
result['match'] = result['h_team'] + " - " + result['a_team']
result = result.groupby(
[data[(data.a_team == team) & (data.result == 'Goal') & (data.key != team)].index,
result.match,
result.matchday]
).h_goals.mean()
return result
if all == True:
result_home = data[(data.h_team == team) & (data.result == 'Goal') & (data.key != team)]
result_home['match'] = result_home['h_team'] + " - " + result_home['a_team']
result_home = result_home.groupby(
[data[(data.h_team == team) & (data.result == 'Goal') & (data.key != team)].index,
result_home.match,
result_home.matchday]
).a_goals.mean()
result_away = data[(data.a_team == team) & (data.result == 'Goal') & (data.key != team)]
result_away['match'] = result_away['h_team'] + " - " + result_away['a_team']
result_away = result_away.groupby(
[data[(data.a_team == team) & (data.result == 'Goal') & (data.key != team)].index,
result_away.match,
result_away.matchday]
).h_goals.mean()
result_all = result_home.append(result_away).sort_index(ascending=True)
result_all = | pd.DataFrame(result_all) | pandas.DataFrame |
# load
import pandas as pd
# import lightgbm
data = pd.read_csv("X_train.csv", index_col=0)
data["mark"] = pd.read_csv("y_train.csv", index_col=0)["mark"]
stud_info = pd.read_csv("studs_info.csv", index_col=False)
X_validation = pd.read_csv("X_test.csv", index_col=0)
# rename columns
field_map = {
"STD_ID": "stud",
"НАПРАВЛЕНИЕ": "profession",
"ГОД": "year",
"АТТЕСТАЦИЯ": "exam_type",
"ДИСЦИПЛИНА": "discipline",
"КУРС": "course",
"СЕМЕСТР": "semester",
" number": "number",
"Пол": "sex",
"Статус": "state",
"Дата выпуска": "release_date",
"Категория обучения": "category",
"Форма обучения": "study_kind",
"Шифр": "cipher",
"направление (специальность)": "speciality",
" ": "what?",
"Образование": "lvl_of_education",
"Дата выдачи": "issue_date",
"Что именно закончил": "education",
}
data.rename(columns=field_map, inplace=True)
X_validation.rename(columns=field_map, inplace=True)
stud_info.rename(columns=field_map, inplace=True)
stud_info.drop(stud_info[stud_info["stud"] == 92222].index, inplace=True)
# stud_info[np.isin(stud_info["number"], range(850, 900))].sort_values(by=["stud"])
# all(stud_info.groupby("speciality")["cipher"].nunique().eq(1))# and
all(stud_info.groupby("cipher")["speciality"].nunique().eq(1))
g = stud_info.groupby("speciality")["cipher"].nunique()
print(g[g != 1])
set(stud_info[stud_info["speciality"] == "Журналистика"]["cipher"])
# 203283
# remove duplicate entries (older ones)
stud_info = stud_info.sort_values(by=["stud", "issue_date"], na_position="first")
stud_info.drop_duplicates(subset=["stud"], keep="last", inplace=True)
import numpy as np
assert len(stud_info[np.isin(stud_info["stud"], stud_info[stud_info.duplicated(subset=["stud"])])]) == 0
# clean up
# for each stud: year == course + const
# for each stud: course == ceil(semester / 2)
# therefore they are noise
fields = ["year", "course"]
data.drop(fields, axis=1, inplace=True)
X_validation.drop(fields, axis=1, inplace=True)
# all nulls and not present in data / validation
stud_info.drop(stud_info[stud_info["stud"] == 92222].index, inplace=True)
# for each stud: all number_s are equal
assert all(stud_info.groupby("number")["stud"].nunique().le(1)) and all(stud_info.groupby("stud")["number"].nunique().le(1))
fields = ["number", "issue_date", "release_date"]
stud_info.drop(fields, axis=1, inplace=True)
{
# ('НС', 'СР'): 4,
# ('ОСН', 'СР'): 3,
# ('НС', 'СП'): 5,
# ('СР', 'СП'): 111,
# ('ОСН', 'СП'): 24,
# ('ОО', 'СР'): 22,
# ('ОО', 'СП'): 131,
('НП', 'СР'): 1,
('НП', 'СП'): 10,
('СП', 'СП'): 7,
('СР', 'СР', 'СП'): 1,
('СР', 'СР'): 1,
('СП', 'СР'): 1,
('СП', 'НП'): 1}
# ('ОО', 'СР' )
# ( 'СР', 'СП')
# ('ОО', 'СП')
# ('ОО', 'СР', 'СП')
# ( 'ОСН', 'СР' )
# ( 'ОСН', 'СП' )
('ОО', 'ОСН', 'СР', 'СП')
('НС', 'СР' )
('НС', 'СП')
# # SeriesGroupBy.cummax()
stud_info
stud_info.fillna({"lvl_of_education": "НЕТ", "what?": 0.0}, inplace=True)
data = data.merge(stud_info, how="left", on="stud")
X_validation = X_validation.merge(stud_info, how="left", on="stud")
data
# encode labels
from sklearn import preprocessing
fields = ["discipline", "profession", "exam_type", "sex", "category", "speciality", "education", "state", "cipher"]
le_s = {
field_name: preprocessing.LabelEncoder().fit( | pd.concat([data[field_name], X_validation[field_name]]) | pandas.concat |
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
from scipy.stats import chi2
from sklearn.linear_model import LogisticRegression
from brightics.common.repr import BrtcReprBuilder
from brightics.common.repr import strip_margin
from brightics.common.repr import pandasDF2MD
from brightics.function.utils import _model_dict
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.function.extraction import one_hot_encoder
from brightics.common.validation import raise_error
import sklearn.utils as sklearn_utils
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import greater_than
from brightics.common.validation import greater_than_or_equal_to
from brightics.common.classify_input_type import check_col_type
def logistic_regression_train(table, group_by=None, **params):
check_required_parameters(_logistic_regression_train, params, ['table'])
params = get_default_from_parameters_if_required(params, _logistic_regression_train)
param_validation_check = [greater_than(params, 0.0, 'C'),
greater_than_or_equal_to(params, 1, 'max_iter'),
greater_than(params, 0.0, 'tol')]
validate(*param_validation_check)
if group_by is not None:
grouped_model = _function_by_group(_logistic_regression_train, table, group_by=group_by, **params)
return grouped_model
else:
return _logistic_regression_train(table, **params)
def _logistic_regression_train(table, feature_cols, label_col, penalty='l2', dual=False, tol=0.0001, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None,
solver='liblinear', max_iter=100, multi_class='ovr', verbose=0, warm_start=False,
n_jobs=1):
feature_names, features = check_col_type(table, feature_cols)
features = pd.DataFrame(features, columns=feature_names)
label = table[label_col]
if(sklearn_utils.multiclass.type_of_target(label) == 'continuous'):
raise_error('0718', 'label_col')
class_labels = sorted(set(label))
if class_weight is not None:
if len(class_weight) != len(class_labels):
raise ValueError("Number of class weights should match number of labels.")
else:
class_weight = {class_labels[i] : class_weight[i] for i in range(len(class_labels))}
lr_model = LogisticRegression(penalty, dual, tol, C, fit_intercept, intercept_scaling, class_weight, random_state,
solver, max_iter, multi_class, verbose, warm_start, n_jobs)
lr_model.fit(features, label)
intercept = lr_model.intercept_
coefficients = lr_model.coef_
classes = lr_model.classes_
is_binary = len(classes) == 2
prob = lr_model.predict_proba(features)
prob_trans = prob.T
classes_dict = dict()
for i in range(len(classes)):
classes_dict[classes[i]] = i
tmp_label = np.array([classes_dict[i] for i in label])
likelihood = 1
for i in range(len(table)):
likelihood *= prob_trans[tmp_label[i]][i]
if fit_intercept:
k = len(feature_cols) + 1
else:
k = len(feature_cols)
aic = 2 * k - 2 * np.log(likelihood)
bic = np.log(len(table)) * k - 2 * np.log(likelihood)
if is_binary:
if fit_intercept:
x_design = np.hstack([np.ones((features.shape[0], 1)), features])
else:
x_design = features.values
v = np.product(prob, axis=1)
x_design_modi = (x_design.T * v).T
cov_logit = np.linalg.inv(np.dot(x_design_modi.T, x_design))
std_err = np.sqrt(np.diag(cov_logit))
if fit_intercept:
logit_params = np.insert(coefficients, 0, intercept)
else:
logit_params = coefficients[0]
wald = (logit_params / std_err) ** 2
p_values = 1 - chi2.cdf(wald, 1)
else:
if fit_intercept:
x_design = np.hstack([np.ones((features.shape[0], 1)), features])
else:
x_design = features.values
std_err = []
for i in range(len(classes)):
v = prob.T[i] * (1 - prob.T[i])
x_design_modi = (x_design.T * v).T
cov_logit = np.linalg.inv(np.dot(x_design_modi.T, x_design))
std_err.append(np.sqrt(np.diag(cov_logit)))
std_err = np.array(std_err)
# print(math.log(likelihood))
if (fit_intercept == True):
summary = pd.DataFrame({'features': ['intercept'] + feature_names})
coef_trans = np.concatenate(([intercept], np.transpose(coefficients)), axis=0)
else:
summary = pd.DataFrame({'features': feature_names})
coef_trans = np.transpose(coefficients)
if not is_binary:
summary = pd.concat((summary, pd.DataFrame(coef_trans, columns=classes)), axis=1)
else:
summary = pd.concat((summary, pd.DataFrame(coef_trans, columns=[classes[0]])), axis=1)
if is_binary:
summary = pd.concat((summary, pd.DataFrame(std_err, columns=['standard_error']), pd.DataFrame(wald, columns=['wald_statistic']), pd.DataFrame(p_values, columns=['p_value'])), axis=1)
else:
columns = ['standard_error_{}'.format(classes[i]) for i in range(len(classes))]
summary = pd.concat((summary, pd.DataFrame(std_err.T, columns=columns)), axis=1)
arrange_col = ['features']
for i in range(len(classes)):
arrange_col.append(classes[i])
arrange_col.append('standard_error_{}'.format(classes[i]))
summary = summary[arrange_col]
if is_binary:
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ## Logistic Regression Result
| ### Summary
| {table1}
|
| ##### Column '{small}' is the coefficients under the assumption ({small} = 0, {big} = 1).
|
| #### AIC : {aic}
|
| #### BIC : {bic}
""".format(small=classes[0], big=classes[1], table1=pandasDF2MD(summary, num_rows=100), aic=aic, bic=bic
)))
else:
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ## Logistic Regression Result
| ### Summary
| {table1}
|
| ##### Each column whose name is one of classes of Label Column is the coefficients under the assumption it is 1 and others are 0.
|
| ##### For example, column '{small}' is the coefficients under the assumption ({small} = 1, others = 0).
|
| #### AIC : {aic}
|
| #### BIC : {bic}
""".format(small=classes[0], table1=pandasDF2MD(summary, num_rows=100), aic=aic, bic=bic
)))
model = _model_dict('logistic_regression_model')
model['standard_errors'] = std_err
model['aic'] = aic
model['bic'] = bic
if is_binary:
model['wald_statistics'] = wald
model['p_values'] = p_values
model['features'] = feature_cols
model['label'] = label_col
model['intercept'] = lr_model.intercept_
model['coefficients'] = lr_model.coef_
model['class'] = lr_model.classes_
model['penalty'] = penalty
model['solver'] = solver
model['lr_model'] = lr_model
model['_repr_brtc_'] = rb.get()
model['summary'] = summary
return {'model' : model}
def logistic_regression_predict(table, model, **params):
check_required_parameters(_logistic_regression_predict, params, ['table', 'model'])
if '_grouped_data' in model:
return _function_by_group(_logistic_regression_predict, table, model, **params)
else:
return _logistic_regression_predict(table, model, **params)
def _logistic_regression_predict(table, model, prediction_col='prediction', prob_prefix='probability',
output_log_prob=False, log_prob_prefix='log_probability', thresholds=None,
suffix='index'):
if (table.shape[0] == 0):
new_cols = table.columns.tolist() + [prediction_col]
classes = model['lr_model'].classes_
if suffix == 'index':
prob_cols = [prob_prefix + '_{}'.format(i) for i in range(len(classes))]
else:
prob_cols = [prob_prefix + '_{}'.format(i) for i in classes]
if output_log_prob:
if suffix == 'index':
log_cols = [log_prob_prefix + '_{}'.format(i) for i in range(len(classes))]
else:
log_cols = [log_prob_prefix + '_{}'.format(i) for i in classes]
else:
log_cols = []
new_cols += prob_cols + log_cols
out_table = pd.DataFrame(columns=new_cols)
return {'out_table': out_table}
if 'features' in model:
feature_cols = model['features']
else:
feature_cols = model['feature_cols']
if 'lr_model' in model:
feature_names, features = check_col_type(table, feature_cols)
features = pd.DataFrame(features, columns=feature_names)
else:
features = table[feature_cols]
if 'auto' in model and 'vs' not in model['_type']:
if model['auto']:
one_hot_input = model['table_4'][:-1][model['table_4']['data_type'][:-1] == 'string'].index
if len(one_hot_input != 0):
features = one_hot_encoder(prefix='col_name', table=features, input_cols=features.columns[one_hot_input].tolist(), suffix='label')['out_table']
features = features[model['table_2']['features']]
else:
one_hot_input = model['table_3'][:-1][model['table_3']['data_type'][:-1] == 'string'].index
if len(one_hot_input != 0):
features = one_hot_encoder(prefix='col_name', table=features, input_cols=features.columns[one_hot_input].tolist(), suffix='label')['out_table']
features = features[model['table_1']['features']]
elif 'auto' in model and 'vs' in model['_type']:
if model['auto']:
one_hot_input = model['table_3'][:-1][model['table_3']['data_type'][:-1] == 'string'].index
if len(one_hot_input != 0):
features = one_hot_encoder(prefix='col_name', table=features, input_cols=features.columns[one_hot_input].tolist(), suffix='label')['out_table']
features = features[model['table_2']['features']]
else:
one_hot_input = model['table_2'][:-1][model['table_2']['data_type'][:-1] == 'string'].index
if len(one_hot_input != 0):
features = one_hot_encoder(prefix='col_name', table=features, input_cols=features.columns[one_hot_input].tolist(), suffix='label')['out_table']
features = features[model['table_1']['features']]
if 'lr_model' in model:
lr_model = model['lr_model']
classes = lr_model.classes_
len_classes = len(classes)
is_binary = len_classes == 2
else:
fit_intercept = model['fit_intercept']
if 'vs' not in model['_type']:
len_classes = 2
is_binary = True
if 'auto' in model:
if model['auto']:
classes = model['table_4']['labels'].values[-1]
classes_type = model['table_4']['data_type'].values[-1]
if classes_type == 'integer' or classes_type == 'long':
classes = np.array([int(i) for i in classes])
elif classes_type == 'float' or classes_type == 'double':
classes = np.array([float(i) for i in classes])
coefficients = model['table_3']['coefficients'][0]
intercept = model['table_3']['intercept'][0]
else:
classes = model['table_3']['labels'].values[-1]
classes_type = model['table_3']['data_type'].values[-1]
if classes_type == 'integer' or classes_type == 'long':
classes = np.array([int(i) for i in classes])
elif classes_type == 'float' or classes_type == 'double':
classes = np.array([float(i) for i in classes])
coefficients = model['table_2']['coefficients'][0]
intercept = model['table_2']['intercept'][0]
else:
classes = np.array([0, 1])
coefficients = model['table_2']['coefficient'][1:]
if fit_intercept:
intercept = model['table_2']['coefficient'][0]
else:
if 'auto' in model:
if model['auto']:
classes = np.array(model['table_3']['labels'].values[-1])
len_classes = len(classes)
is_binary = len_classes == 2
intercept = model['table_2'].intercept
coefficients = model['table_2'].coefficients
else:
classes = np.array(model['table_2']['labels'].values[-1])
len_classes = len(classes)
is_binary = len_classes == 2
intercept = model['table_1'].intercept
coefficients = model['table_1'].coefficients
else:
classes = np.array(model['table_1'].labelInfo)
len_classes = len(classes)
is_binary = len_classes == 2
intercept = model['table_1'].intercept
coefficients = (model['table_1'][[i for i in model['table_1'].columns if 'coefficient' in i]]).values
if thresholds is None:
thresholds = np.array([1 / len_classes for _ in classes])
elif isinstance(thresholds, list):
if len(thresholds) == 1 and is_binary and 0 < thresholds[0] < 1:
thresholds = np.array([thresholds[0], 1 - thresholds[0]])
else:
thresholds = np.array(thresholds)
len_thresholds = len(thresholds)
if len_classes > 0 and len_thresholds > 0 and len_classes != len_thresholds:
# FN-0613='%s' must have length equal to the number of classes.
raise_error('0613', ['thresholds'])
if 'lr_model' in model:
prob = lr_model.predict_proba(features)
else:
features = features.values
coefficients = np.array(coefficients)
if is_binary:
tmp = features * coefficients
if fit_intercept or 'auto' in model:
prob = 1 / (np.exp(np.sum(tmp, axis=1) + intercept) + 1)
else:
prob = 1 / (np.exp(np.sum(tmp, axis=1)) + 1)
prob = np.array([[x, 1 - x] for x in prob])
else:
prob = []
for i in range(len(coefficients)):
tmp = features * coefficients[i]
if fit_intercept:
prob.append(1 / (np.exp(-np.sum(tmp, axis=1) - intercept[i]) + 1))
else:
prob.append(1 / (np.exp(-np.sum(tmp, axis=1)) + 1))
prob = np.array(prob).T
prob = np.apply_along_axis(lambda x: x / np.sum(x), 1 , prob)
prediction = classes[np.argmax(prob / thresholds, axis=1)]
out_table = table.copy()
out_table[prediction_col] = prediction
if suffix == 'index':
suffixes = [i for i, _ in enumerate(classes)]
else:
suffixes = classes
prob_cols = ['{probability_col}_{suffix}'.format(probability_col=prob_prefix, suffix=suffix) for suffix in suffixes]
prob_df = pd.DataFrame(data=prob, columns=prob_cols)
if output_log_prob:
log_prob = np.log(prob)
logprob_cols = ['{log_probability_col}_{suffix}'.format(log_probability_col=log_prob_prefix, suffix=suffix) for suffix in suffixes]
logprob_df = | pd.DataFrame(data=log_prob, columns=logprob_cols) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from delphi_jhu.geo import geo_map, add_county_pop, INCIDENCE_BASE
from delphi_utils import GeoMapper
from delphi_jhu.geo import geo_map, INCIDENCE_BASE
class TestGeoMap:
def test_incorrect_geo(self, jhu_confirmed_test_data):
df = jhu_confirmed_test_data
with pytest.raises(ValueError):
geo_map(df, "département", "cumulative_prop")
def test_fips(self, jhu_confirmed_test_data):
test_df = jhu_confirmed_test_data
fips_df = geo_map(test_df, "county", "cumulative_prop")
test_df = fips_df.loc[(fips_df.geo_id == "01001") & (fips_df.timestamp == "2020-09-15")]
gmpr = GeoMapper()
fips_pop = gmpr.get_crosswalk("fips", "pop")
pop01001 = float(fips_pop.loc[fips_pop.fips == "01001", "pop"])
expected_df = pd.DataFrame({
"geo_id": "01001",
"timestamp": pd.Timestamp("2020-09-15"),
"cumulative_counts": 1463.0,
"new_counts": 1463.0,
"population": pop01001,
"incidence": 1463 / pop01001 * INCIDENCE_BASE,
"cumulative_prop": 1463 / pop01001 * INCIDENCE_BASE
}, index=[36])
pd.testing.assert_frame_equal(test_df, expected_df)
# Make sure the prop signals don't have inf values
assert not fips_df["incidence"].eq(np.inf).any()
assert not fips_df["cumulative_prop"].eq(np.inf).any()
# make sure no megafips reported
assert not any(i[0].endswith("000") for i in fips_df.geo_id)
def test_state_hhs_nation(self, jhu_confirmed_test_data):
df = jhu_confirmed_test_data
state_df = geo_map(df, "state", "cumulative_prop")
test_df = state_df.loc[(state_df.geo_id == "al") & (state_df.timestamp == "2020-09-15")]
gmpr = GeoMapper()
state_pop = gmpr.get_crosswalk("state_id", "pop")
al_pop = float(state_pop.loc[state_pop.state_id == "al", "pop"])
expected_df = pd.DataFrame({
"timestamp": pd.Timestamp("2020-09-15"),
"geo_id": "al",
"cumulative_counts": 140160.0,
"new_counts": 140160.0,
"population": al_pop,
"incidence": 140160 / al_pop * INCIDENCE_BASE,
"cumulative_prop": 140160 / al_pop * INCIDENCE_BASE
}, index=[1])
pd.testing.assert_frame_equal(test_df, expected_df)
test_df = state_df.loc[(state_df.geo_id == "gu") & (state_df.timestamp == "2020-09-15")]
gu_pop = float(state_pop.loc[state_pop.state_id == "gu", "pop"])
expected_df = pd.DataFrame({
"timestamp": pd.Timestamp("2020-09-15"),
"geo_id": "gu",
"cumulative_counts": 502.0,
"new_counts": 16.0,
"population": gu_pop,
"incidence": 16 / gu_pop * INCIDENCE_BASE,
"cumulative_prop": 502 / gu_pop * INCIDENCE_BASE
}, index=[11])
pd.testing.assert_frame_equal(test_df, expected_df)
# Make sure the prop signals don't have inf values
assert not state_df["incidence"].eq(np.inf).any()
assert not state_df["cumulative_prop"].eq(np.inf).any()
hhs_df = geo_map(df, "hhs", "cumulative_prop")
test_df = hhs_df.loc[(hhs_df.geo_id == "1") & (hhs_df.timestamp == "2020-09-15")]
hhs_pop = gmpr.get_crosswalk("hhs", "pop")
pop1 = float(hhs_pop.loc[hhs_pop.hhs == "1", "pop"])
expected_df = pd.DataFrame({
"timestamp": pd.Timestamp("2020-09-15"),
"geo_id": "1",
"cumulative_counts": 218044.0,
"new_counts": 218044.0,
"population": pop1,
"incidence": 218044 / pop1 * INCIDENCE_BASE,
"cumulative_prop": 218044 / pop1 * INCIDENCE_BASE
}, index=[0])
pd.testing.assert_frame_equal(test_df, expected_df)
# Make sure the prop signals don't have inf values
assert not hhs_df["incidence"].eq(np.inf).any()
assert not hhs_df["cumulative_prop"].eq(np.inf).any()
nation_df = geo_map(df, "nation", "cumulative_prop")
test_df = nation_df.loc[(nation_df.geo_id == "us") & (nation_df.timestamp == "2020-09-15")]
fips_pop = gmpr.replace_geocode(add_county_pop(df, gmpr), "fips", "nation")
nation_pop = float(fips_pop.loc[(fips_pop.nation == "us") & (fips_pop.timestamp == "2020-09-15"), "population"])
expected_df = pd.DataFrame({
"timestamp": pd.Timestamp("2020-09-15"),
"geo_id": "us",
"cumulative_counts": 6589234.0,
"new_counts": 6588748.0,
"population": nation_pop,
"incidence": 6588748 / nation_pop * INCIDENCE_BASE,
"cumulative_prop": 6589234 / nation_pop * INCIDENCE_BASE
}, index=[0])
pd.testing.assert_frame_equal(test_df, expected_df)
# Make sure the prop signals don't have inf values
assert not nation_df["incidence"].eq(np.inf).any()
assert not nation_df["cumulative_prop"].eq(np.inf).any()
def test_msa_hrr(self, jhu_confirmed_test_data):
for geo in ["msa", "hrr"]:
test_df = jhu_confirmed_test_data
new_df = geo_map(test_df, geo, "cumulative_prop")
gmpr = GeoMapper()
if geo == "msa":
test_df = add_county_pop(test_df, gmpr)
test_df = gmpr.replace_geocode(test_df, "fips", geo)
if geo == "hrr":
test_df = add_county_pop(test_df, gmpr)
test_df = test_df[~test_df["fips"].str.endswith("000")]
test_df = gmpr.replace_geocode(test_df, "fips", geo)
new_df = new_df.set_index(["geo_id", "timestamp"]).sort_index()
test_df = test_df.set_index([geo, "timestamp"]).sort_index()
# Check that the non-proportional columns are identical
assert new_df.eq(test_df)[["new_counts", "population", "cumulative_counts"]].all().all()
# Check that the proportional signals are identical
exp_incidence = test_df["new_counts"] / test_df["population"] * INCIDENCE_BASE
expected_cumulative_prop = test_df["cumulative_counts"] / test_df["population"] *\
INCIDENCE_BASE
assert new_df["incidence"].eq(exp_incidence).all()
assert new_df["cumulative_prop"].eq(expected_cumulative_prop).all()
# Make sure the prop signals don't have inf values
assert not new_df["incidence"].eq(np.inf).any()
assert not new_df["cumulative_prop"].eq(np.inf).any()
def test_add_county_pop(self):
gmpr = GeoMapper()
test_df = | pd.DataFrame({"fips": ["01001", "06000", "06097", "72000", "72153", "78000"]}) | pandas.DataFrame |
""" pydatastream main module
(c) <NAME>, 2013 - 2021
"""
import warnings
import json
import math
from functools import wraps
import requests
import pandas as pd
###############################################################################
_URL = 'https://product.datastream.com/dswsclient/V1/DSService.svc/rest/'
_FLDS_XREF = ('DSCD,EXMNEM,GEOGC,GEOGN,IBTKR,INDC,INDG,INDM,INDX,INDXEG,'
'INDXFS,INDXL,INDXS,ISIN,ISINID,LOC,MNEM,NAME,SECD,TYPE'.split(','))
_FLDS_XREF_FUT = ('MNEM,NAME,FLOT,FEX,GEOGC,GEOGN,EXCODE,LTDT,FUTBDATE,PCUR,ISOCUR,'
'TICKS,TICKV,TCYCLE,TPLAT'.split(','))
_ASSET_TYPE_CODES = {'BD': 'Bonds & Convertibles',
'BDIND': 'Bond Indices & Credit Default Swaps',
'CMD': 'Commodities',
'EC': 'Economics',
'EQ': 'Equities',
'EQIND': 'Equity Indices',
'EX': 'Exchange Rates',
'FT': 'Futures',
'INT': 'Interest Rates',
'INVT': 'Investment Trusts',
'OP': 'Options',
'UT': 'Unit Trusts',
'EWT': 'Warrants',
'NA': 'Not available'}
###############################################################################
_INFO = """PyDatastream documentation (GitHub):
https://github.com/vfilimonov/pydatastream
Datastream Navigator:
http://product.datastream.com/navigator/
Official support
https://customers.reuters.com/sc/Contactus/simple?product=Datastream&env=PU&TP=Y
Webpage for testing REST API requests
http://product.datastream.com/dswsclient/Docs/TestRestV1.aspx
Documentation for DSWS API
http://product.datastream.com/dswsclient/Docs/Default.aspx
Datastream Web Service Developer community
https://developers.refinitiv.com/eikon-apis/datastream-web-service
"""
###############################################################################
###############################################################################
def _convert_date(date):
""" Convert date to YYYY-MM-DD """
if date is None:
return ''
if isinstance(date, str) and (date.upper() == 'BDATE'):
return 'BDATE'
return pd.Timestamp(date).strftime('%Y-%m-%d')
def _parse_dates(dates):
""" Parse dates
Example:
/Date(1565817068486) -> 2019-08-14T21:11:08.486000000
/Date(1565568000000+0000) -> 2019-08-12T00:00:00.000000000
"""
if dates is None:
return None
if isinstance(dates, str):
return pd.Timestamp(_parse_dates([dates])[0])
res = [int(_[6:(-7 if '+' in _ else -2)]) for _ in dates]
return pd.to_datetime(res, unit='ms').values
class DatastreamException(Exception):
""" Exception class for Datastream """
###############################################################################
def lazy_property(fn):
""" Lazy-evaluated property of an object """
attr_name = '__lazy__' + fn.__name__
@property
@wraps(fn)
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
###############################################################################
# Main Datastream class
###############################################################################
class Datastream():
""" Python interface to the Refinitiv Datastream API via Datastream Web
Services (DSWS).
"""
def __init__(self, username, password, raise_on_error=True, proxy=None, **kwargs):
"""Establish a connection to the Python interface to the Refinitiv Datastream
(former Thomson Reuters Datastream) API via Datastream Web Services (DSWS).
username / password - credentials for the DSWS account.
raise_on_error - If True then error request will raise a "DatastreamException",
otherwise either empty dataframe or partially
retrieved data will be returned
proxy - URL for the proxy server. Valid values:
(a) None: no proxy is used
(b) string of format "host:port" or "username:password@host:port"
Note: credentials will be saved in memory. In case if this is not
desirable for security reasons, call the constructor having None
instead of values and manually call renew_token(username, password)
when needed.
A custom REST API url (if necessary for some reasons) could be provided
via "url" parameter.
"""
self.raise_on_error = raise_on_error
self.last_request = None
self.last_metadata = None
self._last_response_raw = None
# Setting up proxy parameters if necessary
if isinstance(proxy, str):
self._proxy = {'http': proxy, 'https': proxy}
elif proxy is None:
self._proxy = None
else:
raise ValueError('Proxy parameter should be either None or string')
self._url = kwargs.pop('url', _URL)
self._username = username
self._password = password
# request new token
self.renew_token(username, password)
###########################################################################
@staticmethod
def info():
""" Some useful links """
print(_INFO)
###########################################################################
def _api_post(self, method, request):
""" Call to the POST method of DSWS API """
url = self._url + method
self.last_request = {'url': url, 'request': request, 'error': None}
self.last_metadata = None
try:
res = requests.post(url, json=request, proxies=self._proxy)
self.last_request['response'] = res.text
except Exception as e:
self.last_request['error'] = str(e)
raise
try:
response = self.last_request['response'] = json.loads(self.last_request['response'])
except json.JSONDecodeError as e:
raise DatastreamException('Server response could not be parsed') from e
if 'Code' in response:
code = response['Code']
if response['SubCode'] is not None:
code += '/' + response['SubCode']
errormsg = f'{code}: {response["Message"]}'
self.last_request['error'] = errormsg
raise DatastreamException(errormsg)
return self.last_request['response']
###########################################################################
def renew_token(self, username=None, password=None):
""" Request new token from the server """
if username is None or password is None:
warnings.warn('Username or password is not provided - could not renew token')
return
data = {"UserName": username, "Password": password}
self._token = dict(self._api_post('GetToken', data))
self._token['TokenExpiry'] = _parse_dates(self._token['TokenExpiry']).tz_localize('UTC')
# Token is invalidated 15 minutes before exporation time
# Note: According to https://github.com/vfilimonov/pydatastream/issues/27
# tokens do not always respect the (as of now 24 hours) expiry time
# So for this reason I limit the token life at 6 hours.
self._token['RenewTokenAt'] = min(self._token['TokenExpiry'] - pd.Timedelta('15m'),
pd.Timestamp.utcnow() + pd.Timedelta('6H'))
@property
def _token_is_expired(self):
if self._token is None:
return True
if | pd.Timestamp.utcnow() | pandas.Timestamp.utcnow |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import csv
import hashlib
from typing import ContextManager
import srt
import pandas
import functools
from pydub import AudioSegment
from datetime import datetime, timedelta
from pathlib import Path
from praatio import tgio
from .clean_transcript import clean_transcript
ALPHABET_FILE_PATH = "/DeepSpeech/bin/bangor_welsh/alphabet.txt"
def get_directory_structure(rootdir):
dir = {}
rootdir = rootdir.rstrip(os.sep)
start = rootdir.rfind(os.sep) + 1
for path, dirs, files in os.walk(rootdir, followlinks=True):
folders = path[start:].split(os.sep)
subdir = dict.fromkeys(files)
parent = functools.reduce(dict.get, folders[:-1], dir)
parent[folders[-1]] = subdir
return dir
def import_textgrid(target_csv_file, textfile):
print ("Importing clips and transcripts from %s " % textfile)
target_data_root_dir = Path(target_csv_file).parent
target_clips_dir = os.path.join(target_data_root_dir, "clips")
Path(target_clips_dir).mkdir(parents=True, exist_ok=True)
df = | pandas.DataFrame(columns=['wav_filename', 'wav_filesize', 'transcript']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import astropy.io.fits as fits
import os
def create_folders(path_list):
for item_path in path_list:
if not os.path.exists(item_path):
os.mkdir(item_path)
def match_simu_detect(simulated_outcat_path, detected_outcat_path, match_save_path):
# simulated_outcat_path, detected_outcat_path, match_save_path = outcat_name, fit_outcat_name, match_save_path
if not os.path.exists(match_save_path):
os.mkdir(match_save_path)
clump_item = simulated_outcat_path.split('_')[-1].split('.')[0]
Match_table = os.path.join(match_save_path, 'Match_table')
Miss_table = os.path.join(match_save_path, 'Miss_table')
False_table = os.path.join(match_save_path, 'False_table')
create_folders([Match_table, Miss_table, False_table])
Match_table_name = os.path.join(Match_table, 'Match_%s.txt' %clump_item)
Miss_table_name = os.path.join(Miss_table, 'Miss_%s.txt' % clump_item)
False_table_name = os.path.join(False_table, 'False_%s.txt' % clump_item)
table_s = pd.read_csv(simulated_outcat_path, sep='\t')
table_g = pd.read_csv(detected_outcat_path, sep='\t')
if table_g.values.shape[1] == 1:
table_g = pd.read_csv(detected_outcat_path, sep=' ')
# table_simulate1=pd.read_csv(path_outcat,sep=' ')
# table_g=pd.read_csv(path_outcat_wcs,sep='\t')
# table_g.columns = new_cols
Error_xyz = np.array([2, 2, 2]) # 匹配容许的最大误差(单位:像素)
Cen_simulate = np.vstack([table_s['Cen1'], table_s['Cen2'], table_s['Cen3']]).T
# Cen_simulate = np.vstack([table_s['Peak1'], table_s['Peak2'], table_s['Peak3']]).T
Size_simulate = np.vstack([table_s['Size1'], table_s['Size2'], table_s['Size3']]).T
Cen_gauss = np.vstack([table_g['Cen1'], table_g['Cen2'], table_g['Cen3']]).T
Cen_gauss = Cen_gauss[~np.isnan(Cen_gauss).any(axis=1), :]
# calculate distance
simu_len = Cen_simulate.shape[0]
gauss_len = Cen_gauss.shape[0]
distance = np.zeros([simu_len, gauss_len])
for i, item_simu in enumerate(Cen_simulate):
for j, item_gauss in enumerate(Cen_gauss):
cen_simu = item_simu
cen_gauss = item_gauss
temp = np.sqrt(((cen_gauss - cen_simu)**2).sum())
distance[i,j] = temp
max_d = 1.2 * distance.max()
match_record_simu_detect = [] #匹配核表
match_num = 0
while 1:
# 找到距离最小的行和列
d_ij_value = distance.min()
if d_ij_value == max_d: # 表示距离矩阵里面所有元素都匹配上了
break
[simu_i, gauss_j] = np.where(distance==d_ij_value)
simu_i, gauss_j = simu_i[0], gauss_j[0]
cen_simu_i = Cen_simulate[simu_i]
size_simu_i = Size_simulate[simu_i]
cen_gauss_j = Cen_gauss[gauss_j]
# 确定误差项
temp = np.array([Error_xyz, size_simu_i / 2.3548])
Error_xyz1 = temp.min(axis=0)
d_ij = np.abs(cen_simu_i - cen_gauss_j)
match_num_ = match_num
if (d_ij<= Error_xyz1).all():
# print([d_ij, d_ij_value])
distance[simu_i,:] = np.ones([gauss_len]) * max_d
distance[:, gauss_j] = np.ones([simu_len]) * max_d
match_num = match_num + 1
match_record_simu_detect.append(np.array([d_ij_value, simu_i + 1, gauss_j + 1])) # 误差 仿真表索引 检测表索引
if match_num == match_num_:
break
match_record_simu_detect = np.array(match_record_simu_detect)
F1, precision, recall = 0, 0, 0
if match_num > 0:
precision = match_num / gauss_len
recall = match_num / simu_len
F1 = 2 * precision * recall / (precision + recall)
# print("simulated num = %d\t detected num %d\t match num %d" % (simu_len, gauss_len, match_num))
print("F1_precision_recall = %.3f, %.3f, %.3f" % (F1, precision, recall))
# new_cols = ['PIDENT', 'Peak1', 'Peak2', 'Peak3', 'Cen1', 'Cen2', 'Cen3', 'Size1', 'Size2', 'Size3', 'theta', 'Peak',
# 'Sum', 'Volume']
if match_record_simu_detect.shape[0] > 0:
new_cols_sium = table_s.keys()
new_cols_detect = table_g.keys()
names = ['s_' + item for item in new_cols_sium] #列名
names1 = ['f_' + item for item in new_cols_detect] # 列名
table_title = names + names1
match_simu_inx = match_record_simu_detect[:, 1].astype(np.int)
table_s_np = table_s.values[match_simu_inx - 1, :]
match_gauss = match_record_simu_detect[:, 2].astype(np.int)
table_g_np = table_g.values[match_gauss - 1, :]
match_outcat = np.hstack([table_s_np, table_g_np])
dataframe = pd.DataFrame(match_outcat, columns=table_title)
# dataframe = dataframe.round({'ID': 0, 'Peak1': 0, 'Peak2': 0, 'Peak3': 0, 'Cen1': 3, 'Cen2': 3, 'Cen3': 3,
# 'Size1': 3, 'Size2': 3, 'Size3': 3, 'Peak': 3, 'Sum': 3, 'Volume': 3})
dataframe.to_csv(Match_table_name, sep='\t', index=False)
# simu_inx = table_s['ID']
simu_inx = np.array([item + 1 for item in range(table_s['ID'].shape[0])])
# x = set([0.0])
miss_idx = np.setdiff1d(simu_inx, match_simu_inx).astype(np.int) # 未检测到的云核编号
miss_names = ['s_' + item for item in new_cols_sium] #列名
if len(miss_idx) == 0:
miss_outcat = []
else:
miss_outcat = table_s.values[miss_idx - 1, :]
dataframe = pd.DataFrame(miss_outcat, columns=miss_names)
# dataframe = dataframe.round({'ID': 0, 'Peak1': 0, 'Peak2': 0, 'Peak3': 0, 'Cen1': 3, 'Cen2': 3, 'Cen3': 3,
# 'Size1': 3, 'Size2': 3, 'Size3': 3, 'Peak': 3, 'Sum': 3, 'Volume': 3})
dataframe.to_csv(Miss_table_name, sep='\t', index=False)
# miss = Table(names=miss_names)
# for item in miss_idx: # 未检出表
# miss.add_row(list(table_s[int(item) - 1, :]))
# miss.write(Miss_table_name, overwrite=True, format='ascii')
try:
# gauss_inx = table_g['ID']
gauss_inx = np.array([item + 1 for item in range(table_g['ID'].shape[0])])
except KeyError:
gauss_inx = table_g['PIDENT']
false_idx = np.setdiff1d(gauss_inx, match_gauss).astype(np.int)
if len(false_idx) == 0:
false_outcat = []
else:
# print(false_idx)
false_outcat = table_g.values[false_idx - 1, :]
false_names = ['f_' + item for item in new_cols_detect] # 列名
dataframe = pd.DataFrame(false_outcat, columns=false_names)
# dataframe = dataframe.round({'ID': 0, 'Peak1': 0, 'Peak2': 0, 'Peak3': 0, 'Cen1': 3, 'Cen2': 3, 'Cen3': 3,
# 'Size1': 3, 'Size2': 3, 'Size3': 3, 'Peak': 3, 'Sum': 3, 'Volume': 3})
dataframe.to_csv(False_table_name, sep='\t', index=False)
else:
new_cols_sium = table_s.keys()
new_cols_detect = table_g.keys()
names = ['s_' + item for item in new_cols_sium] # 列名
names1 = ['f_' + item for item in new_cols_detect] # 列名
table_title = names + names1
match_outcat = []
dataframe = pd.DataFrame(match_outcat, columns=table_title)
dataframe.to_csv(Match_table_name, sep='\t', index=False)
miss_names = ['s_' + item for item in new_cols_sium] # 列名
miss_outcat = []
dataframe = pd.DataFrame(miss_outcat, columns=miss_names)
dataframe.to_csv(Miss_table_name, sep='\t', index=False)
false_outcat = []
false_names = ['f_' + item for item in new_cols_detect] # 列名
dataframe = pd.DataFrame(false_outcat, columns=false_names)
dataframe.to_csv(False_table_name, sep='\t', index=False)
def match_simu_detect_2d(simulated_outcat_path, detected_outcat_path, match_save_path):
# simulated_outcat_path, detected_outcat_path, match_save_path = outcat_name, fit_outcat_name, match_save_path
if not os.path.exists(match_save_path):
os.mkdir(match_save_path)
clump_item = simulated_outcat_path.split('_')[-1].split('.')[0]
Match_table = os.path.join(match_save_path, 'Match_table')
Miss_table = os.path.join(match_save_path, 'Miss_table')
False_table = os.path.join(match_save_path, 'False_table')
create_folders([Match_table, Miss_table, False_table])
Match_table_name = os.path.join(Match_table, 'Match_%s.txt' % clump_item)
Miss_table_name = os.path.join(Miss_table, 'Miss_%s.txt' % clump_item)
False_table_name = os.path.join(False_table, 'False_%s.txt' % clump_item)
# table_simulate1 = np.loadtxt(simulated_outcat_path, skiprows=1)
# table_g = np.loadtxt(detected_outcat_path, skiprows=1)
table_s = pd.read_csv(simulated_outcat_path, sep='\t')
table_g = pd.read_csv(detected_outcat_path, sep='\t')
if table_g.values.shape[1] == 1:
table_g = pd.read_csv(detected_outcat_path, sep=' ')
# table_simulate1=pd.read_csv(path_outcat,sep=' ')
# table_g=pd.read_csv(path_outcat_wcs,sep='\t')
# table_g.columns = new_cols
Error_xyz = np.array([2, 2]) # 匹配容许的最大误差(单位:像素)
Cen_simulate = np.vstack([table_s['Cen1'], table_s['Cen2']]).T
Size_simulate = np.vstack([table_s['Size1'], table_s['Size2']]).T
try:
Cen_gauss = np.vstack([table_g['Cen1'], table_g['Cen2']]).T
except KeyError:
Cen_gauss = np.vstack([table_g['cen1'], table_g['cen2']]).T
Cen_gauss = Cen_gauss[~np.isnan(Cen_gauss).any(axis=1), :]
# calculate distance
simu_len = Cen_simulate.shape[0]
gauss_len = Cen_gauss.shape[0]
distance = np.zeros([simu_len, gauss_len])
for i, item_simu in enumerate(Cen_simulate):
for j, item_gauss in enumerate(Cen_gauss):
cen_simu = item_simu
cen_gauss = item_gauss
temp = np.sqrt(((cen_gauss - cen_simu) ** 2).sum())
distance[i, j] = temp
max_d = 1.2 * distance.max()
match_record_simu_detect = [] # 匹配核表
match_num = 0
while 1:
# 找到距离最小的行和列
d_ij_value = distance.min()
if d_ij_value == max_d: # 表示距离矩阵里面所有元素都匹配上了
break
[simu_i, gauss_j] = np.where(distance == d_ij_value)
simu_i, gauss_j = simu_i[0], gauss_j[0]
cen_simu_i = Cen_simulate[simu_i]
size_simu_i = Size_simulate[simu_i]
cen_gauss_j = Cen_gauss[gauss_j]
# 确定误差项
temp = np.array([Error_xyz, size_simu_i / 2.3548])
Error_xyz1 = temp.min(axis=0)
d_ij = np.abs(cen_simu_i - cen_gauss_j)
match_num_ = match_num
if (d_ij <= Error_xyz1).all():
# print([d_ij, d_ij_value])
distance[simu_i, :] = np.ones([gauss_len]) * max_d
distance[:, gauss_j] = np.ones([simu_len]) * max_d
match_num = match_num + 1
match_record_simu_detect.append(np.array([d_ij_value, simu_i + 1, gauss_j + 1])) # 误差 仿真表索引 检测表索引
if match_num == match_num_:
break
match_record_simu_detect = np.array(match_record_simu_detect)
F1, precision, recall = 0, 0, 0
if match_num > 0:
precision = match_num / gauss_len
recall = match_num / simu_len
F1 = 2 * precision * recall / (precision + recall)
# print("simulated num = %d\t detected num %d\t match num %d" % (simu_len, gauss_len, match_num))
print("F1_precision_recall = %.3f, %.3f, %.3f" % (F1, precision, recall))
# new_cols = ['PIDENT', 'Peak1', 'Peak2', 'Peak3', 'Cen1', 'Cen2', 'Cen3', 'Size1', 'Size2', 'Size3', 'theta', 'Peak',
# 'Sum', 'Volume']
if match_record_simu_detect.shape[0] > 0:
new_cols_sium = table_s.keys()
new_cols_detect = table_g.keys()
names = ['s_' + item for item in new_cols_sium] # 列名
names1 = ['f_' + item for item in new_cols_detect] # 列名
table_title = names + names1
match_simu_inx = match_record_simu_detect[:, 1].astype(np.int)
table_s_np = table_s.values[match_simu_inx - 1, :]
match_gauss = match_record_simu_detect[:, 2].astype(np.int)
table_g_np = table_g.values[match_gauss - 1, :]
match_outcat = np.hstack([table_s_np, table_g_np])
dataframe = pd.DataFrame(match_outcat, columns=table_title)
# dataframe = dataframe.round({'ID': 0, 'Peak1': 0, 'Peak2': 0, 'Peak3': 0, 'Cen1': 3, 'Cen2': 3, 'Cen3': 3,
# 'Size1': 3, 'Size2': 3, 'Size3': 3, 'Peak': 3, 'Sum': 3, 'Volume': 3})
dataframe.to_csv(Match_table_name, sep='\t', index=False)
simu_inx = table_s['ID']
# x = set([0.0])
miss_idx = np.setdiff1d(simu_inx, match_simu_inx).astype(np.int) # 未检测到的云核编号
miss_names = ['s_' + item for item in new_cols_sium] # 列名
if len(miss_idx) == 0:
miss_outcat = []
else:
miss_outcat = table_s.values[miss_idx - 1, :]
dataframe = pd.DataFrame(miss_outcat, columns=miss_names)
# dataframe = dataframe.round({'ID': 0, 'Peak1': 0, 'Peak2': 0, 'Peak3': 0, 'Cen1': 3, 'Cen2': 3, 'Cen3': 3,
# 'Size1': 3, 'Size2': 3, 'Size3': 3, 'Peak': 3, 'Sum': 3, 'Volume': 3})
dataframe.to_csv(Miss_table_name, sep='\t', index=False)
# miss = Table(names=miss_names)
# for item in miss_idx: # 未检出表
# miss.add_row(list(table_s[int(item) - 1, :]))
# miss.write(Miss_table_name, overwrite=True, format='ascii')
try:
gauss_inx = table_g['ID']
except KeyError:
gauss_inx = table_g['PIDENT']
false_idx = np.setdiff1d(gauss_inx, match_gauss).astype(np.int)
if len(false_idx) == 0:
false_outcat = []
else:
# print(false_idx)
false_outcat = table_g.values[false_idx - 1, :]
false_names = ['f_' + item for item in new_cols_detect] # 列名
dataframe = | pd.DataFrame(false_outcat, columns=false_names) | pandas.DataFrame |
import os
import math
import time
import numpy as np
import pandas as pd
def find_epoch(df, epochLabel, isFlagged, numEpochs, epochStartTime, epochEndTime):
'''
Creates dictionary of each epoch. Values are
dataframes...
'''
epochs = {}
i = 1
while i <= numEpochs: # number of epochs
if(isFlagged): # Ethovision has a bucket for this epoch type
num = str(i)
label = epochLabel + ' ' + num
if(epochStartTime!=0): # if we're looking for a timeframe outside the bucket
epochstart = epochs.iloc[0] #Time for epoch start, as a dataframe
starttime = math.floor(epochstart['Recording time']-epochStartTime) # Epoch start
endtime = math.floor(tonestart['Recording time']+epochEndTime) #T Epoch end
itime = df.index.get_loc(starttime,method='bfill') #Index for epoch start
etime = df.index.get_loc(endtime,method='bfill') #Index for epoch end
epoch = df.iloc[itime:etime] #dataframe for epoch
else: # Epoch spans, and is fully contained within, the bucket
epoch = | pd.DataFrame(df[df[label] == 1]) | pandas.DataFrame |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
msg = 'cannot be converted to datetime64'
with pytest.raises(ValueError, match=msg):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with pytest.raises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [ | datetime(2013, 10, 7) | pandas.datetime |
#!/usr/bin/env python
# coding: utf-8
# # ReEDS Scenarios on PV ICE Tool STATES
# To explore different scenarios for furture installation projections of PV (or any technology), ReEDS output data can be useful in providing standard scenarios. ReEDS installation projections are used in this journal as input data to the PV ICE tool.
#
# Current sections include:
#
# <ol>
# <li> ### Reading a standard ReEDS output file and saving it in a PV ICE input format </li>
# <li>### Reading scenarios of interest and running PV ICE tool </li>
# <li>###Plotting </li>
# <li>### GeoPlotting.</li>
# </ol>
# Notes:
#
# Scenarios of Interest:
# the Ref.Mod,
# o 95-by-35.Adv, and
# o 95-by-35+Elec.Adv+DR ones
#
# In[1]:
import PV_ICE
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
from IPython.display import display
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 8)
# In[2]:
import os
from pathlib import Path
testfolder = str(Path().resolve().parent.parent.parent / 'PV_ICE' / 'TEMP' / 'SF_States')
statedatafolder = str(Path().resolve().parent.parent.parent / 'PV_ICE' / 'TEMP' / 'STATEs')
print ("Your simulation will be stored in %s" % testfolder)
# In[3]:
PV_ICE.__version__
# ### Reading REEDS original file to get list of SCENARIOs, PCAs, and STATEs
# In[4]:
r"""
reedsFile = str(Path().resolve().parent.parent.parent / 'December Core Scenarios ReEDS Outputs Solar Futures v2a.xlsx')
print ("Input file is stored in %s" % reedsFile)
rawdf = pd.read_excel(reedsFile,
sheet_name="UPV Capacity (GW)")
#index_col=[0,2,3]) #this casts scenario, PCA and State as levels
#now set year as an index in place
#rawdf.drop(columns=['State'], inplace=True)
rawdf.drop(columns=['Tech'], inplace=True)
rawdf.set_index(['Scenario','Year','PCA', 'State'], inplace=True)
scenarios = list(rawdf.index.get_level_values('Scenario').unique())
PCAs = list(rawdf.index.get_level_values('PCA').unique())
STATEs = list(rawdf.index.get_level_values('State').unique())
simulationname = scenarios
simulationname = [w.replace('+', '_') for w in simulationname]
simulationname
SFscenarios = [simulationname[0], simulationname[4], simulationname[8]]
"""
# ### Reading GIS inputs
# In[5]:
r"""
GISfile = str(Path().resolve().parent.parent.parent.parent / 'gis_centroid_n.xlsx')
GIS = pd.read_excel(GISfile)
GIS = GIS.set_index('id')
GIS.head()
GIS.loc['p1'].long
"""
# ### Create Scenarios in PV_ICE
# #### Downselect to Solar Future scenarios of interest
#
# Scenarios of Interest:
# <li> Ref.Mod
# <li> 95-by-35.Adv
# <li> 95-by-35+Elec.Adv+DR
# In[6]:
SFscenarios = ['Reference.Mod', '95-by-35.Adv', '95-by-35_Elec.Adv_DR']
SFscenarios
# In[7]:
STATEs = ['WA', 'CA', 'VA', 'FL', 'MI', 'IN', 'KY', 'OH', 'PA', 'WV', 'NV', 'MD',
'DE', 'NJ', 'NY', 'VT', 'NH', 'MA', 'CT', 'RI', 'ME', 'ID', 'MT', 'WY', 'UT', 'AZ', 'NM',
'SD', 'CO', 'ND', 'NE', 'MN', 'IA', 'WI', 'TX', 'OK', 'OR', 'KS', 'MO', 'AR', 'LA', 'IL', 'MS',
'AL', 'TN', 'GA', 'SC', 'NC']
# ### Create the 3 Scenarios and assign Baselines
#
# Keeping track of each scenario as its own PV ICE Object.
# In[8]:
MATERIALS = ['glass', 'silicon', 'silver','copper','aluminium','backsheet','encapsulant']
# In[9]:
#for ii in range (0, 1): #len(scenarios):
i = 0
r1 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r1.createScenario(name=STATEs[jj], file=filetitle)
r1.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
i = 1
r2 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r2.createScenario(name=STATEs[jj], file=filetitle)
r2.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
i = 2
r3 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r3.createScenario(name=STATEs[jj], file=filetitle)
r3.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
# # Calculate Mass Flow
# In[10]:
r1.scenMod_noCircularity()
r2.scenMod_noCircularity()
r3.scenMod_noCircularity()
IRENA= False
PERFECTMFG = False
ELorRL = 'RL'
if IRENA:
r1.scenMod_IRENIFY(ELorRL=ELorRL)
r2.scenMod_IRENIFY(ELorRL=ELorRL)
r3.scenMod_IRENIFY(ELorRL=ELorRL)
if PERFECTMFG:
r1.scenMod_PerfectManufacturing()
r2.scenMod_PerfectManufacturing()
r3.scenMod_PerfectManufacturing()
# In[11]:
r1.calculateMassFlow()
r2.calculateMassFlow()
r3.calculateMassFlow()
# In[12]:
print("STATEs:", r1.scenario.keys())
print("Module Keys:", r1.scenario[STATEs[jj]].data.keys())
print("Material Keys: ", r1.scenario[STATEs[jj]].material['glass'].materialdata.keys())
# # OPEN EI
# In[13]:
kk=0
SFScenarios = [r1, r2, r3]
SFScenarios[kk].name
# In[14]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
if keywdlevel[jj] == 'material':
for ii in range (0, len(materials)):
sentit = '@value|'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
for ii in range (0, len(materials)):
sentit = '@value|Cumulative'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]].cumsum()/keywscale[jj]
else:
sentit = '@value|'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
#sentit = '@value|'+keywprint[jj]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
sentit = '@value|Cumulative'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]].cumsum()/keywscale[jj]
foo['@states'] = STATEs[zz]
foo['@scenario|Solar Futures'] = SFScenarios[kk].name
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE OpenEI.csv', index=False)
print("Done")
# In[15]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
if keywdlevel[jj] == 'material':
for ii in range (0, len(materials)):
sentit = '@value|'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]]/keywscale[jj]
else:
sentit = '@value|'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
#sentit = '@value|'+keywprint[jj]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]]/keywscale[jj]
foo['@states'] = STATEs[zz]
foo['@scenario|Solar Futures'] = SFScenarios[kk].name
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE OpenEI Yearly Only.csv', index=False)
print("Done")
# In[16]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
if keywdlevel[jj] == 'material':
if keywdcumneed[jj]:
for ii in range (0, len(materials)):
sentit = '@value|Cumulative'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]].cumsum()/keywscale[jj]
else:
if keywdcumneed[jj]:
sentit = '@value|Cumulative'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]].cumsum()/keywscale[jj]
foo['@states'] = STATEs[zz]
foo['@scenario|Solar Futures'] = SFScenarios[kk].name
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv(title_Method+' OpenEI Cumulatives Only.csv', index=False)
print("Done")
# In[ ]:
# WORK ON THIS FOIR OPENEI
# SCENARIO DIFERENCeS
keyw=['new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['NewInstalledCapacity','InstalledCapacity']
keywprint = ['NewInstalledCapacity','InstalledCapacity']
sfprint = ['Reference','Grid Decarbonization', 'High Electrification']
keywunits = ['MW','MW']
keywdcumneed = [True,False]
keywdlevel = ['module','module']
keywscale = [1,1e6]
materials = []
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
# kk -- scenario
for kk in range(0, 3):
sentit = '@value|'+keywprint[jj]+'|'+sfprint[kk]+'#'+keywunits[jj]
#sentit = '@value|'+keywprint[jj]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
sentit = '@value|Cumulative'+keywprint[jj]+'|'+sfprint[kk]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]].cumsum()/keywscale[jj]
# foo['@value|scenario|Solar Futures'] = SFScenarios[kk].name
foo['@states'] = STATEs[zz]
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE OpenEI ScenarioDifferences.csv', index=False)
print("Done")
# In[ ]:
scenariolist.head()
# # SAVE DATA FOR BILLY: STATES
# In[ ]:
#for 3 significant numbers rounding
N = 2
# SFScenarios[kk].scenario[PCAs[zz]].data.year
#
# Index 20 --> 2030
#
# Index 30 --> 2040
#
# Index 40 --> 2050
# In[ ]:
idx2030 = 20
idx2040 = 30
idx2050 = 40
print("index ", idx2030, " is year ", r1.scenario[STATEs[0]].data['year'].iloc[idx2030])
print("index ", idx2040, " is year ", r1.scenario[STATEs[0]].data['year'].iloc[idx2040])
print("index ", idx2050, " is year ", r1.scenario[STATEs[0]].data['year'].iloc[idx2050])
# #### 6 - STATE Cumulative Virgin Needs by 2050
#
# In[ ]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = []
for ii in range (0, len(materials)):
keywordsum = []
for zz in range (0, len(STATEs)):
keywordsum.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword].sum())
materiallist.append(keywordsum)
df = pd.DataFrame (materiallist,columns=STATEs, index = materials)
df = df.T
df = df.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , df], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE 6 - STATE Cumulative2050 VirginMaterialNeeds_tons.csv')
# #### 7 - STATE Cumulative EoL Only Waste by 2050
# In[ ]:
keyword='mat_Total_EOL_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = []
for ii in range (0, len(materials)):
keywordsum = []
for zz in range (0, len(STATEs)):
keywordsum.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword].sum())
materiallist.append(keywordsum)
df = pd.DataFrame (materiallist,columns=STATEs, index = materials)
df = df.T
df = df.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , df], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE 7 - STATE Cumulative2050 Waste_EOL_tons.csv')
# ##### 8 - STATE Yearly Virgin Needs 2030 2040 2050
# In[ ]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(STATEs)):
keywordsum2030.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2030])
keywordsum2040.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2040])
keywordsum2050.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2050])
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=STATEs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = pd.concat([materiallist, yearlylist], axis=1)
materiallist = materiallist.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , materiallist], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PVICE 8 - STATE Yearly 2030 2040 2050 VirginMaterialNeeds_tons.csv')
# #### 9 - STATE Yearly EoL Waste 2030 2040 205
# In[ ]:
keyword='mat_Total_EOL_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(STATEs)):
keywordsum2030.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2030])
keywordsum2040.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2040])
keywordsum2050.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2050])
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=STATEs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = pd.concat([materiallist, yearlylist], axis=1)
materiallist = materiallist.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , materiallist], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tonnes
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PVICE 9 - STATE Yearly 2030 2040 2050 Waste_EOL_tons.csv')
# # APPENDIX TABLES
#
#
# #### Appendix - Cumulative Virgin Stock
# In[ ]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(STATEs)):
keywordsum2030.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][0:20].sum())
keywordsum2040.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][0:30].sum())
keywordsum2050.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][0:].sum())
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=STATEs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = pd.concat([materiallist, yearlylist], axis=1)
materiallist = materiallist.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , materiallist], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
# Loop over SF Scenarios
for kk in range(0, 3):
filter_col = [col for col in scenariolist if (col.startswith(SFScenarios[kk].name)) ]
scen = scenariolist[filter_col]
scen.columns = scen.columns.str.lstrip(SFScenarios[kk].name+'_') # strip suffix at the right end only.
scen = scen.rename_axis('State')
scen = scen.sort_values(by='glass_2050', ascending=False)
scen.sum(axis=0)
reduced = scen.iloc[0:23]
new_row = pd.Series(data=scen.iloc[23::].sum(axis=0), name='OTHER STATES')
new_row_2 = pd.Series(data=scen.sum(axis=0), name='US TOTAL')
reduced = reduced.append(new_row, ignore_index=False)
reduced = reduced.append(new_row_2, ignore_index=False)
reduced = reduced.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
reduced = reduced.applymap(lambda x: int(x))
reduced.to_csv('PV ICE Appendix - '+ SFScenarios[kk].name + ' Cumulative Virgin Stock by State.csv')
# #### Appendix - Yearly Virgin Stock
# In[ ]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(STATEs)):
keywordsum2030.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2030])
keywordsum2040.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2040])
keywordsum2050.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2050])
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=STATEs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = pd.concat([materiallist, yearlylist], axis=1)
materiallist = materiallist.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , materiallist], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
# Loop over SF Scenarios
for kk in range(0, 3):
filter_col = [col for col in scenariolist if (col.startswith(SFScenarios[kk].name)) ]
scen = scenariolist[filter_col]
scen.columns = scen.columns.str.lstrip(SFScenarios[kk].name+'_') # strip suffix at the right end only.
scen = scen.rename_axis('State')
scen = scen.sort_values(by='glass_2050', ascending=False)
reduced = scen.iloc[0:23]
new_row = pd.Series(data=scen.iloc[23::].sum(axis=0), name='OTHER STATES')
new_row_2 = pd.Series(data=scen.sum(axis=0), name='US TOTAL')
reduced = reduced.append(new_row, ignore_index=False)
reduced = reduced.append(new_row_2, ignore_index=False)
reduced = reduced.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
reduced = reduced.applymap(lambda x: int(x))
reduced.to_csv('PV ICE Appendix - '+ SFScenarios[kk].name + ' Yearly Virgin Stock by State.csv')
# #### Appendix - Cumulative EOL_ WASTE by State
# In[ ]:
keyword='mat_Total_EOL_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(STATEs)):
keywordsum2030.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][0:20].sum())
keywordsum2040.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][0:30].sum())
keywordsum2050.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][0:].sum())
yearlylist = | pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=STATEs, index = [2030, 2040, 2050]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
import copy
from .databunch import *
from .evaluate import *
def to_numpy(X):
try:
return arr.data.cpu().numpy()
except: pass
return arr
class ptarray(np.ndarray):
_metadata = ['_pt_scaler', '_pt_indices', '_train_indices', '_valid_indices', '_test_indices', '_ycolumn', '_columns', '_bias']
def __new__(cls, input_array):
return np.asarray(input_array).view(cls)
def __array_finalize__(self, obj) -> None:
if obj is None: return
d = { a:getattr(obj, a) for a in self._metadata if hasattr(obj, a) }
self.__dict__.update(d)
def __array_function__(self, func, types, *args, **kwargs):
return self._wrap(super().__array_function__(func, types, *args, **kwargs))
def __getitem__(self, item):
r = super().__getitem__(item)
if type(item) == tuple and len(item) == 2 and type(item[0]) == slice:
r._columns = r._columns[item[1]]
if self._check_list_attr('_pt_scaler'):
r._pt_scaler = r._pt_scaler[item[1]]
return r
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
def cast(i):
if type(i) is PTArray:
return i.view(np.ndarray)
return i
inputs = [ cast(i) for i in inputs ]
return self._wrap(super().__array_ufunc__(ufunc, method, *inputs, **kwargs))
def _check_list_attr(self, attr):
try:
return hasattr(self, attr) and len(getattr(self, attr)) > 0
except:
return False
def _pt_scaler_exists(self):
return self._check_list_attr('_pt_scaler')
def _test_indices_exists(self):
return self._check_list_attr('_test_indices')
# def add_bias(self):
# assert not hasattr(self, '_bias'), 'You cannot add a bias twice'
# self._bias = 1
# r = self._wrap(np.concatenate([np.ones((self.shape[0], 1)), self], axis=1))
# r._bias = 1
# r._columns = ['bias'] + r._columns
# if r._pt_scaler_exists():
# r._pt_scaler = [None] + r._pt_scaler
# return r
def ycolumn(self, columns):
r = copy.copy(self)
r._ycolumn = columns
return r
@property
def yscaler(self):
return [ s for s, c in zip(self._pt_scaler, self._columns) if c in self._ycolumn ]
@property
def _ycolumnsnr(self):
return [ i for i, c in enumerate(self._columns) if c in self._ycolumn ]
@property
def _xcolumns(self):
return [ c for c in self._columns if c not in self._ycolumn ]
@property
def _xcolumnsnr(self):
return [ i for i, c in enumerate(self._columns) if c not in self._ycolumn ]
def _train_indices_exists(self):
return self._check_list_attr('_train_indices')
def _valid_indices_exists(self):
return self._check_list_attr('_valid_indices')
def _wrap(self, a):
a = PTArray(a)
a.__dict__.update(self.__dict__)
return a
def polynomials(self, degree):
assert not self._pt_scaler_exists(), "Run polynomials before scaling"
poly = PolynomialFeatures(degree, include_bias=False)
p = poly.fit_transform(self[:,:-self.ycolumns])
return self._wrap(np.concatenate([p, self[:, -self.ycolumns:]], axis=1))
def to_arrays(self):
if self._test_indices_exists():
return self.train_X, self.valid_X, self.test_X, self.train_y, self.valid_y, self.test_y
elif self._valid_indices_exists():
return self.train_X, self.valid_X, self.train_y, self.valid_y
else:
return self.train_X, self.train_y
def scale(self, scalertype=StandardScaler):
assert self._train_indices_exists(), "Split the DataFrame before scaling!"
assert not self._pt_scaler_exists(), "Trying to scale twice, which is a really bad idea!"
r = self._wrap(copy.deepcopy(self))
r._pt_scaler = tuple(self._create_scaler(scalertype, column) for column in self[self._train_indices].T)
return r.transform(self)
@staticmethod
def _create_scaler(scalertype, column):
scaler = scalertype()
scaler.fit(column.reshape(-1,1))
return scaler
def transform(self, array):
out = []
for column, scaler in zip(array.T, self._pt_scaler):
if scaler is not None:
out.append(scaler.transform(column.reshape(-1,1)))
else:
out.append(column)
return self._wrap(np.concatenate(out, axis=1))
def inverse_transform_y(self, y):
y = to_numpy(y)
y = y.reshape(-1, len(self._ycolumns))
out = [ y[i] if self._pt_scaler[-self._ycolumns+i] is None else self._pt_scaler[-self._ycolumns+i].inverse_transform(y[:,i]) for i in range(y.shape[1]) ]
if len(out) == 1:
return self._wrap(out[0])
return self._wrap(np.concatenate(out, axis=1))
def inverse_transform_X(self, X):
X = to_numpy(X)
transform = [ X[i] if self._pt_scaler[i] is None else self._pt_scaler[i].inverse_transform(X[:,i]) for i in range(X.shape[1]) ]
return np._wrap(np.concatenate(transform, axis=1))
def inverse_transform(self, X, y):
y = PTDataFrame(self.inverse_transform_y(y), columns=self._ycolumns)
X = PTDataFrame(self.inverse_transform_X(X), columns=self._xcolumns)
return | pd.concat([X, y], axis=1) | pandas.concat |
'''the code is used to change the corrected subsystem into the standard format of Ben'''
import os
import re
import pandas as pd
import numpy as np
# set the directory
os.chdir('/Users/luho/PycharmProjects/model/model_correction/code')
os.getcwd()
subsystem = pd.read_table('../data/subsystem_correction.tsv')
subsystem['Subsystem_new'] = subsystem['Subsystem_new'].str.replace('(', '@') # string replace
subsystem['num'] = [None]*len(subsystem['Subsystem_new']) # construct the null column
for i in range(len(subsystem['Subsystem_new'])):
subsystem['num'][i] = subsystem['Subsystem_new'][i].count('@') # count the number of specific character in each row
subsystem1 = subsystem[subsystem['num'] >=2]
subsystem['Subsystem_new'] = subsystem['Subsystem_new'].str.replace('@gpi', 'gpi')
subsystem['Subsystem_new'] = subsystem['Subsystem_new'].str.replace('@tca', 'gpi')
subsystem2 = subsystem['Subsystem_new'].str.split('@', expand=True) # split one column into multiple
subsystem2.iloc[:,1] = subsystem2.iloc[:,1].str.replace(')','')
subsystem2['new'] = [None]*len(subsystem['Subsystem_new'])
subsystem2['new'] = subsystem2.iloc[:,1] + ' ' + subsystem2.iloc[:,0].map(str) # combine multiple column into one
'''replace nan with subysystem'''
for i in range(len(subsystem['Subsystem_new'])):
if | pd.isnull(subsystem2['new'][i]) | pandas.isnull |
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from hwer.validation import *
from hwer.utils import average_precision
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.options.display.width = 0
import warnings
import os
from hwer import CategoricalEmbed, FlairGlove100AndBytePairEmbed, NumericEmbed, Node, Edge
from hwer.utils import merge_dicts_nested, build_row_dicts
from ast import literal_eval
import numpy as np
def process_age(age):
age = np.searchsorted([20, 30, 40, 50, 60], age)
return age
def process_zip(zip):
try:
zip = int(zip)
zip = int(zip / 10)
except ValueError:
zip = -1
return zip
def process_zip_vectorized(zip):
return np.vectorize(process_zip)(zip)
def get_data_mapper(df_user, df_item, dataset="100K"):
def prepare_data_mappers_100K():
user_nodes = [Node("user", n) for n in df_user.user.values]
n_users = len(user_nodes)
df_user['age_processed'] = process_age(df_user['age'])
df_user['zip_1'] = process_zip_vectorized(df_user['zip'])
df_user['zip_2'] = process_zip_vectorized(df_user['zip_1'])
user_data = dict(zip(user_nodes, build_row_dicts("categorical", df_user[["gender", "age_processed", "occupation", "zip_1", "zip_2"]].values)))
user_numeric = dict(zip(user_nodes, build_row_dicts("numeric", df_user[["user_rating_mean", "user_rating_count"]].values)))
user_data = merge_dicts_nested(user_data, user_numeric)
item_nodes = [Node("item", i) for i in df_item.item.values]
n_items = len(item_nodes)
df_item.year_processed = "_" + df_item.year.apply(str) + "_"
item_text = dict(zip(item_nodes, build_row_dicts("text", df_item.text.values)))
item_cats = dict(zip(item_nodes, build_row_dicts("categorical", df_item[["year_processed", "genres"]].values)))
item_numerics = dict(zip(item_nodes, build_row_dicts("numeric", np.abs(df_item[["title_length", "overview_length", "runtime", "item_rating_mean", "item_rating_count"]].values))))
item_data = merge_dicts_nested(item_text, item_cats, item_numerics)
assert len(user_data) == n_users
assert len(item_data) == n_items
node_data = dict(user_data)
node_data.update(item_data)
embedding_mapper = dict(user=dict(categorical=CategoricalEmbed(n_dims=32), numeric=NumericEmbed(32)),
item=dict(text=FlairGlove100AndBytePairEmbed(), categorical=CategoricalEmbed(n_dims=32), numeric=NumericEmbed(32)))
return embedding_mapper, node_data
if dataset == "100K":
return prepare_data_mappers_100K
elif dataset == "1M":
return prepare_data_mappers_100K
elif dataset == "20M":
pass
else:
raise ValueError("Unsupported Dataset")
def get_data_reader(dataset="100K"):
def process_100K_1M(users, movies, train, test):
train = train[["user", "item", "rating", "timestamp"]]
test = test[["user", "item", "rating", "timestamp"]]
user_stats = train.groupby(["user"])["rating"].agg(["mean", "count"]).reset_index()
item_stats = train.groupby(["item"])["rating"].agg(["mean", "count"]).reset_index()
user_stats.rename(columns={"mean": "user_rating_mean", "count": "user_rating_count"}, inplace=True)
item_stats.rename(columns={"mean": "item_rating_mean", "count": "item_rating_count"}, inplace=True)
user_stats["user"] = user_stats["user"].astype(int)
item_stats["item"] = item_stats["item"].astype(int)
train["is_test"] = False
test["is_test"] = True
ratings = pd.concat((train, test))
movies.genres = movies.genres.fillna("[]").apply(literal_eval)
movies['year'] = movies['year'].fillna(-1).astype(int)
movies.keywords = movies.keywords.fillna("[]").apply(literal_eval)
movies.keywords = movies.keywords.apply(lambda x: " ".join(x))
movies.tagline = movies.tagline.fillna("")
text_columns = ["title", "keywords", "overview", "tagline", "original_title"]
movies[text_columns] = movies[text_columns].fillna("")
movies['text'] = movies["title"] + " " + movies["keywords"] + " " + movies["overview"] + " " + movies[
"tagline"] + " " + \
movies["original_title"]
movies["title_length"] = movies["title"].apply(len)
movies["overview_length"] = movies["overview"].apply(len)
movies["runtime"] = movies["runtime"].fillna(0.0)
if "id" in users.columns:
users.rename(columns={"id": "user"}, inplace=True)
else:
users.rename(columns={"user_id": "user"}, inplace=True)
if "id" in movies.columns:
movies.rename(columns={"id": "item"}, inplace=True)
else:
movies.rename(columns={"movie_id": "item"}, inplace=True)
users = users.merge(user_stats, how="left", on="user")
movies = movies.merge(item_stats, how="left", on="item")
movies = movies.fillna(movies.mean())
users = users.fillna(users.mean())
return users, movies, ratings
def read_data_100K(**kwargs):
users = | pd.read_csv("100K/users.csv", sep="\t") | pandas.read_csv |
import pandas as pd
from collections import defaultdict
import pysam
import pathlib
import json
import numpy as np
from scipy.stats import poisson
from statsmodels.stats.multitest import multipletests
from concurrent.futures import ProcessPoolExecutor, as_completed
def _calculate_cell_record(allc_path, output_path, cov_cutoff=2, resolution=100):
"""Count the high coverage bins for each cell, save results to json"""
allc_path = str(allc_path)
output_path = str(output_path)
cov_high_cutoff = int(cov_cutoff * 2)
cell_records = {}
i = 0
with pysam.TabixFile(allc_path) as allc:
for i, line in enumerate(allc.fetch()):
chrom, pos, *_, cov, _ = line.split('\t')
cov = int(cov)
if cov_cutoff < cov <= cov_high_cutoff:
bin_id = int(pos) // resolution
try:
cell_records[chrom][bin_id] += 1
except KeyError:
cell_records[chrom] = defaultdict(int)
cell_records[chrom][bin_id] += 1
cell_total_c = i + 1
cell_records = {
chrom: list(values.keys())
for chrom, values in cell_records.items()
}
# final output to disk
total_records = {'total_c': cell_total_c, 'bins': cell_records}
with open(output_path, 'w') as f:
json.dump(total_records, f)
return output_path
def calculate_blacklist_region(region_records, alpha=0.01):
"""Collect highly covered regions by region-wise poisson FDR p value < alpha"""
# calculate region poisson mu
sum_of_bin = 0
n_bin = 0
for chrom, chrom_values in region_records.items():
sum_of_bin += sum(chrom_values.values())
n_bin += len(chrom_values)
mu = sum_of_bin / n_bin
# calculate region FDR p cutoff
total_p = []
for chrom, chrom_values in region_records.items():
chrom_values = pd.Series(chrom_values)
p_values = poisson.sf(chrom_values.values, mu)
total_p.append(p_values)
total_p = np.concatenate(total_p)
judge, *_ = multipletests(total_p, alpha=alpha, method='fdr_bh')
p_max = total_p[judge].max()
del total_p, judge
# calculate region blacklist
final_blacklist = {}
for chrom, chrom_values in region_records.items():
chrom_values = pd.Series(chrom_values)
p_values = poisson.sf(chrom_values.values, mu)
final_blacklist[chrom] = list(chrom_values[p_values < p_max].index)
return final_blacklist
def _calculate_cell_final_values(output_path, region_blacklist):
"""Calculate final cell values while remove blacklist"""
with open(output_path) as f:
cell_record = json.load(f)
total_n = 0
for chrom, bins in cell_record['bins'].items():
total_n += len(set(bins) - region_blacklist[chrom])
return total_n, cell_record['total_c']
def coverage_doublets(allc_dict: dict,
resolution: int = 100,
cov_cutoff=2,
region_alpha=0.01,
tmp_dir='doublets_temp_dir',
cpu=1,
keep_tmp=False):
"""
Quantify cell high coverage bins for doublets evaluation
Parameters
----------
allc_dict
dict with cell_id as key, allc_path as value
resolution
genome bin resolution to quantify, bps
cov_cutoff
cutoff the cov, sites within cov_cutoff < cov <= 2 * cov_cutoff will be count
region_alpha
FDR adjusted P-value cutoff
tmp_dir
temporary dir to save the results
cpu
number of cpu to use
keep_tmp
Whether save the tem_dir for debugging
Returns
-------
"""
tmp_dir = pathlib.Path(tmp_dir)
tmp_dir.mkdir(exist_ok=True)
# count each cell and collect region-wise sum in the same time
region_records = {}
def _sum_region(p):
with open(p) as cr:
cell_record = json.load(cr)
for chrom, chrom_bins in cell_record['bins'].items():
if chrom not in region_records:
region_records[chrom] = defaultdict(int)
for bin_id in chrom_bins:
region_records[chrom][bin_id] += 1
return
cell_paths = {}
with ProcessPoolExecutor(cpu) as exe:
futures = {}
for cell_id, path in allc_dict.items():
output_path = f"{tmp_dir}/{cell_id}.json"
if pathlib.Path(output_path).exists():
# directly quantify region records
_sum_region(output_path)
cell_paths[cell_id] = output_path
continue
future = exe.submit(_calculate_cell_record,
allc_path=path,
output_path=output_path,
resolution=resolution,
cov_cutoff=cov_cutoff)
futures[future] = cell_id
# during calculating the cell records, also summarize region records
for future in as_completed(futures):
cell_id = futures[future]
output_path = future.result()
cell_paths[cell_id] = output_path
_sum_region(output_path)
# calculate dataset specific region blacklist
region_blacklist = calculate_blacklist_region(region_records, alpha=region_alpha)
with open(f'{tmp_dir}/region_blacklist.json', 'w') as f:
json.dump(region_blacklist, f)
# list to set, dump don't support set
region_blacklist = {k: set(v) for k, v in region_blacklist.items()}
# calculate cell final stats
total_values = []
cells = []
for cell_id, output_path in cell_paths.items():
cell_values = _calculate_cell_final_values(output_path, region_blacklist)
total_values.append(cell_values)
cells.append(cell_id)
total_values = | pd.DataFrame(total_values, index=cells, columns=['TotalHCB', 'TotalC']) | pandas.DataFrame |
from brightics.common.report import ReportBuilder, strip_margin, plt2MD, dict2MD, \
pandasDF2MD, keyValues2MD
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
import pandas as pd
import numpy as np
def add_row_number(table, group_by=None, **params):
check_required_parameters(_add_row_number, params, ['table'])
if group_by is not None:
return _function_by_group(_add_row_number, table, group_by=group_by, **params)
else:
return _add_row_number(table, **params)
def _add_row_number(table, new_col='add_row_number'):
df = pd.DataFrame()
n = len(table)
for i in range(1, n + 1):
df2 = pd.DataFrame([{new_col:i}])
df = df.append(df2, ignore_index=True)
out_table = pd.concat([df, table], axis=1)
return {'out_table': out_table}
def discretize_quantile(table, group_by=None, **params):
check_required_parameters(_discretize_quantile, params, ['table'])
if group_by is not None:
return _function_by_group(_discretize_quantile, table, group_by=group_by, **params)
else:
return _discretize_quantile(table, **params)
def _discretize_quantile(table, input_col, num_of_buckets=2, out_col_name='bucket_number'):
out_table = table.copy()
out_table[out_col_name], buckets = pd.qcut(table[input_col], num_of_buckets, labels=False, retbins=True, precision=10, duplicates='drop')
# Build model
rb = ReportBuilder()
rb.addMD(strip_margin("""
## Quantile-based Discretization Result
"""))
# index_list, bucket_list
index_list = []
bucket_list = []
for i, bucket in enumerate(buckets):
if i == 1:
index_list.append(i - 1)
bucket_list.append("[{left}, {bucket}]".format(left=left, bucket=bucket))
elif i > 1:
index_list.append(i - 1)
bucket_list.append("({left}, {bucket}]".format(left=left, bucket=bucket))
left = bucket
# cnt_array
cnt = np.zeros(len(index_list), int)
for i in range(len(table)):
cnt[out_table[out_col_name][i]] += 1
# Build model
result = dict()
result_table = pd.DataFrame.from_items([
['bucket number', index_list],
['buckets', bucket_list],
['count', cnt]
])
result['result_table'] = result_table
rb.addMD(strip_margin("""
### Data = {input_col}
|
| {result_table}
""".format(input_col=input_col, n=num_of_buckets, result_table=pandasDF2MD(result_table))))
result['report'] = rb.get()
return {'out_table': out_table, 'model': result}
def binarizer(table, column, threshold=0, threshold_type='greater', out_col_name=None):
if out_col_name is None:
out_col_name = 'binarized_' + str(column)
table[out_col_name] = 0
for t in range(0, len(table[column])):
if threshold_type == 'greater':
if table[column][t] > threshold:
table[out_col_name][t] = 1
else:
if table[column][t] >= threshold:
table[out_col_name][t] = 1
return{'table':table}
def capitalize_variable(table, input_cols, replace, out_col_suffix=None):
if out_col_suffix is None:
out_col_suffix = '_' + replace
out_table = table
for input_col in input_cols:
out_col_name = input_col + out_col_suffix
out_col = | pd.DataFrame(columns=[out_col_name]) | pandas.DataFrame |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, pd.np.NaN],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_different_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
res_exp = pd.Series([-30.20, 2 * 30.5 / 1.32, 10.2 * 0.8],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
assert_series_equal(res, res_exp)
def test_to_notional_duplicates():
instrs = pd.Series([1, 1], index=['A', 'A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37, 200.37], index=['A', 'A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100, 100], index=['A', 'A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD', 'USD'], index=['A', 'A'])
fx_rate = pd.Series([1.32], index=['USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD'], index=['A'])
fx_rate = pd.Series([1.32, 1.32], index=['USDCAD', 'USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
def test_to_notional_bad_fx():
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
instr_fx = pd.Series(['JPY'], index=['A'])
fx_rates = pd.Series([1.32], index=['GBPCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
def test_to_contracts_rounder():
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
multipliers = pd.Series([1, 1], index=['CLZ6', 'COZ6'])
# 30.19 / 30.20 is slightly less than 1 so will round to 0
notional = pd.Series([30.19, 2 * 30.5], index=['CLZ6', 'COZ6'])
res = util.to_contracts(notional, prices, multipliers,
rounder=pd.np.floor)
res_exp = pd.Series([0, 2], index=['CLZ6', 'COZ6'])
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier():
notionals = pd.Series([-30.20, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier_rounding():
# won't work out to integer number of contracts so this tests rounding
notionals = pd.Series([-30.21, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_trade_with_zero_amount():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, 0], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) + 0 * 0.5 / (50.41*100) - 1,
# 0 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 19], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_trade_all_zero_amount_return_empty():
wts = pd.DataFrame([1], index=["CLX16"], columns=[0])
desired_holdings = pd.Series([13], index=[0])
current_contracts = 0
prices = pd.Series([50.32], index=['CLX16'])
multiplier = pd.Series([100], index=['CLX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
exp_trades = pd.Series(dtype="int64")
assert_series_equal(trades, exp_trades)
def test_trade_one_asset():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_multi_asset():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=["CL0", "CL1"])
wts2 = pd.DataFrame([1], index=["COX16"], columns=["CO0"])
wts = {"CL": wts1, "CO": wts2}
desired_holdings = pd.Series([200000, -50000, 100000],
index=["CL0", "CL1", "CO0"])
current_contracts = pd.Series([0, 1, 0, 5],
index=['CLX16', 'CLZ16', 'CLF17',
'COX16'])
prices = pd.Series([50.32, 50.41, 50.48, 49.50],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
multiplier = pd.Series([100, 100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
# 100000 * 1 / (49.50*100) - 5,
exp_trades = pd.Series([20, 14, -5, 15], index=['CLX16', 'CLZ16',
'CLF17', 'COX16'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_extra_desired_holdings_without_weights():
wts = pd.DataFrame([0], index=["CLX16"], columns=["CL0"])
desired_holdings = pd.Series([200000, 10000], index=["CL0", "CL1"])
current_contracts = pd.Series([0], index=['CLX16'])
prices = pd.Series([50.32], index=['CLX16'])
multipliers = pd.Series([1], index=['CLX16'])
with pytest.raises(ValueError):
util.calc_trades(current_contracts, desired_holdings, wts, prices,
multipliers)
def test_trade_extra_desired_holdings_without_current_contracts():
# this should treat the missing holdings as 0, since this would often
# happen when adding new positions without any current holdings
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1],
index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
# non existent contract holdings result in fill value being a float,
# which casts to float64
assert_series_equal(trades, exp_trades, check_dtype=False)
def test_trade_extra_weights():
# extra weights should be ignored
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000], index=[0])
current_contracts = pd.Series([0, 2], index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
multiplier = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 2,
exp_trades = pd.Series([20, 18], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_get_multiplier_dataframe_weights():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000], index=["CL"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dict_weights():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
wts2 = pd.DataFrame([0.5, 0.5], index=["COX16", "COZ16"], columns=[0])
wts = {"CL": wts1, "CO": wts2}
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16", "COX16",
"COZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dataframe_weights_multiplier_asts_error():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
with pytest.raises(ValueError):
util.get_multiplier(wts, ast_mult)
def test_weighted_expiration_two_generics():
vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF15'),
(TS('2015-01-03'), 'CLG15'),
(TS('2015-01-04'), 'CLF15'),
(TS('2015-01-04'), 'CLG15'),
(TS('2015-01-04'), 'CLH15'),
(TS('2015-01-05'), 'CLG15'),
(TS('2015-01-05'), 'CLH15')])
weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=idx)
contract_dates = pd.Series([TS('2015-01-20'),
TS('2015-02-21'),
TS('2015-03-20')],
index=['CLF15', 'CLG15', 'CLH15'])
wexp = util.weighted_expiration(weights, contract_dates)
exp_wexp = pd.DataFrame([[17.0, 49.0], [32.0, 61.5], [47.0, 74.0]],
index=[TS('2015-01-03'),
TS('2015-01-04'),
TS('2015-01-05')],
columns=["CL1", "CL2"])
assert_frame_equal(wexp, exp_wexp)
def test_flatten():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_dict():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')])
weights2 = pd.DataFrame(1, index=widx, columns=["CO1"])
weights = {"CL": weights1, "CO": weights2}
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_bad_input():
dummy = 0
with pytest.raises(ValueError):
util.flatten(dummy)
def test_unflatten():
flat_wts = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
wts_exp = pd.DataFrame(vals, index=widx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_unflatten_dict():
flat_wts = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
weights1 = pd.DataFrame(vals, index=widx, columns=cols)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')],
names=("date", "contract"))
cols = pd.Index(["CO1"], name="generic")
weights2 = pd.DataFrame(1, index=widx, columns=cols)
wts_exp = {"CL": weights1, "CO": weights2}
assert_dict_of_frames(wts, wts_exp)
def test_reindex():
# related to https://github.com/matthewgilbert/mapping/issues/11
# no op
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
prices = pd.Series([103, 101, 102, 100], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
new_prices = util.reindex(prices, widx, limit=0)
exp_prices = prices
assert_series_equal(exp_prices, new_prices)
# missing front prices error
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5')])
prices = pd.Series([100], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5')])
with pytest.raises(ValueError):
util.reindex(prices, widx, 0)
# NaN returns introduced and filled
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')])
prices = pd.Series([100, 101, 102, 103, 104], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLF5'),
(TS('2015-01-05'), 'CLH5'),
(TS('2015-01-06'), 'CLF5'),
(TS('2015-01-06'), 'CLH5')])
new_prices = util.reindex(prices, widx, limit=1)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-01'), 'CLH5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLF5'),
(TS('2015-01-05'), 'CLH5'),
(TS('2015-01-06'), 'CLF5'),
(TS('2015-01-06'), 'CLH5')
])
exp_prices = pd.Series([100, np.NaN, 101, 102, 103, 104, 103,
104, np.NaN, np.NaN], index=idx)
assert_series_equal(exp_prices, new_prices)
# standard subset
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-01'), 'CHF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')])
prices = pd.Series([100, 101, 102, 103, 104, 105, 106, 107], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
new_prices = util.reindex(prices, widx, limit=0)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
exp_prices = pd.Series([100, 102, 103, 104, 105], index=idx)
assert_series_equal(exp_prices, new_prices)
# check unique index to avoid duplicates from pd.Series.reindex
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
prices = pd.Series([100.10, 101.13], index=idx)
widx = pd.MultiIndex.from_tuples([( | TS('2015-01-03') | pandas.Timestamp |
""" slalom.dataops.pandasutils module """
import os
from logless import (
get_logger,
logged,
logged_block,
)
import uio
USE_SCRATCH_DIR = False
logging = get_logger("slalom.dataops.sparkutils")
try:
import pandas as pd
except Exception as ex:
pd = None
logging.warning(f"Could not load pandas library. Try 'pip install pandas'. {ex}")
def _raise_if_missing_pandas(as_warning=False, ex=None):
if not pd:
msg = f"Could not load pandas library. Try 'pip install pandas'. {ex or ''}"
if as_warning:
logging.warning(msg)
else:
raise RuntimeError(msg)
def read_csv_dir(csv_dir, usecols=None, dtype=None):
_raise_if_missing_pandas()
df_list = []
for s3_path in uio.list_s3_files(csv_dir):
if "_SUCCESS" not in s3_path:
if USE_SCRATCH_DIR:
scratch_dir = uio.get_scratch_dir()
filename = os.path.basename(s3_path)
csv_path = os.path.join(scratch_dir, filename)
if os.path.exists(csv_path):
logging.info(
f"Skipping download of '{s3_path}'. File exists as: '{csv_path}' "
"(If you do not want to use this file, please delete "
"the file or unset the USE_SCRATCH_DIR environment variable.)"
)
else:
logging.info(
f"Downloading S3 file '{s3_path}' to scratch dir: '{csv_path}'"
)
uio.download_s3_file(s3_path, csv_path)
else:
logging.info(f"Reading from S3 file: {s3_path}")
csv_path = s3_path
df = pd.read_csv(
csv_path, index_col=None, header=0, usecols=usecols, dtype=dtype
)
df_list.append(df)
logging.info(f"Concatenating datasets from: {csv_dir}")
ret_val = pd.concat(df_list, axis=0, ignore_index=True)
logging.info("Dataset concatenation was successful.")
return ret_val
def get_pandas_df(source_path, usecols=None):
if not pd:
raise RuntimeError(
"Could not execute get_pandas_df(): Pandas library not loaded."
)
if ".xlsx" in source_path.lower():
df = read_excel_sheet(source_path, usecols=usecols)
else:
try:
df = | pd.read_csv(source_path, low_memory=False, usecols=usecols) | pandas.read_csv |
import os
import pandas as pd
import numpy as np
import datetime
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.ensemble import (
RandomForestRegressor, AdaBoostRegressor, GradientBoostingRegressor
)
from sklearn.linear_model import TheilSenRegressor, Lars, Lasso
from mlcomp import Competition, create_app, eval_metrics
def load_boston_data():
# Load the data
boston = load_boston()
df = | pd.DataFrame(boston.data, columns=boston.feature_names) | pandas.DataFrame |
#!/usr/bin/env python
# By <NAME>
# June 7, 2020
# ZTF crossmatch with X-Ray Binaries (ROSAT catalog)
import io
import time
import datetime
import argparse
import logging
import gzip
from threading import Lock
from copy import deepcopy
import numpy as np
import pandas as pd
import fastavro
from astropy.coordinates import SkyCoord, match_coordinates_sky
import astropy.units as u
from astropy.io import fits
from aiokafka import AIOKafkaConsumer
import asyncio
from concurrent.futures import ThreadPoolExecutor
import functools
from astroquery.simbad import Simbad
from .constants import LOGGING, BASE_DIR, DB_DIR, SIMBAD_EXCLUDES # FITS_DIR
from .db_caching import create_connection, insert_data, update_value, insert_lc_dataframe, get_cached_ids
# Example command line execution:
# for date 210119, program_id 1, 64 cores, suffix 64t
# $ python ztf_rosat_crossmatch.py 210119 1 64 64t
SIGMA_TO_95pctCL = 1.95996
FITS_DIR = "../local/cutouts_debug"
def exception_handler(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logging.exception(e)
return wrapper
@exception_handler
def read_avro_file(fname):
"""Reads a single packet from an avro file stored with schema on disk."""
with open(fname, "rb") as f:
freader = fastavro.reader(f)
for packet in freader:
return packet
@exception_handler
def read_avro_bytes(buf):
"""Reads a single packet from an avro file stored with schema on disk."""
with io.BytesIO(buf) as f:
freader = fastavro.reader(f)
for packet in freader:
return packet
@exception_handler
def get_candidate_info(packet):
return {"ra": packet["candidate"]["ra"], "dec": packet["candidate"]["dec"],
"object_id": packet["objectId"], "candid": packet["candid"]}
@exception_handler
def save_cutout_fits(packet, output):
"""Save fits cutouts from packed into output."""
objectId = packet["objectId"]
pid = packet["candidate"]["pid"]
for im_type in ["Science", "Template", "Difference"]:
with gzip.open(io.BytesIO(packet[f"cutout{im_type}"]["stampData"]), "rb") as f:
with fits.open(io.BytesIO(f.read())) as hdul:
hdul.writeto(f"{output}/{objectId}_{pid}_{im_type}.fits", overwrite=True)
@exception_handler
def make_dataframe(packet, repeat_obs=True):
"""Extract relevant lightcurve data from packet into pandas DataFrame."""
df = pd.DataFrame(packet["candidate"], index=[0])
if repeat_obs:
df["ZTF_object_id"] = packet["objectId"]
return df[["ZTF_object_id", "jd", "fid", "magpsf", "sigmapsf", "diffmaglim"]]
df_prv = pd.DataFrame(packet["prv_candidates"])
df_merged = | pd.concat([df, df_prv], ignore_index=True) | pandas.concat |
# coding: utf-8
# # Content
# __1. Exploratory Visualization__
# __2. Data Cleaning__
# __3. Feature Engineering__
# __4. Modeling & Evaluation__
# __5. Ensemble Methods__
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
get_ipython().run_line_magic('matplotlib', 'inline')
plt.style.use('ggplot')
# In[2]:
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import RobustScaler, StandardScaler
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import Pipeline, make_pipeline
from scipy.stats import skew
from sklearn.decomposition import PCA, KernelPCA
from sklearn.preprocessing import Imputer
# In[3]:
from sklearn.model_selection import cross_val_score, GridSearchCV, KFold
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.linear_model import ElasticNet, SGDRegressor, BayesianRidge
from sklearn.kernel_ridge import KernelRidge
from xgboost import XGBRegressor
# In[4]:
pd.set_option('max_colwidth',200)
pd.set_option('display.width',200)
pd.set_option('display.max_columns',500)
pd.set_option('display.max_rows',1000)
# In[7]:
train=pd.read_csv('E:/Workspace/HousePrices/train.csv')
test= | pd.read_csv('E:/Workspace/HousePrices/test.csv') | pandas.read_csv |
import cobra
from cobra.core.metabolite import elements_and_molecular_weights
elements_and_molecular_weights['R']=0.0
elements_and_molecular_weights['Z']=0.0
import pandas as pd
import numpy as np
import csv
#### Change Biomass composition
# define a function change a biomass reaction in the model
def update_biomass(model, rxn, stoich, metabolite):
r = model.reactions.get_by_id(rxn)
new_stoich = stoich
# you now have a dictionary of new stoichs for your model
for m,s in r.metabolites.items():
stoich = s*-1
temp_dict = {m:stoich}
r.add_metabolites(temp_dict)
r.add_metabolites(new_stoich)
# Then get the total to equal 1 mg biomass DW
total = 0
for m,s in r.metabolites.items():
gfw = model.metabolites.get_by_id(m.id).formula_weight
mass = gfw*s*-1
total = total+mass
correction = total/1000 # this will get it to 1000 ug total mass
# Then adjust the stoichiometry as appropriate
for m,s in r.metabolites.items(): # now change the stoich
to_add = s/correction-s
r.add_metabolites({m:to_add})
# Finally build the biomass_c metabolite
imbal = r.check_mass_balance()
if 'charge' in imbal.keys():
met_charge = imbal['charge']*-1
del imbal['charge']
met_mass = 0
formula_string = ''
for e,v in imbal.items():
if v > 1e-10 or v < -1e-10:
mass = elements_and_molecular_weights[e]
met_mass = met_mass+(mass*-1*v)
form_str = e+str(-1*v)
formula_string = formula_string + form_str
met = model.metabolites.get_by_id(metabolite)
met.formula = formula_string
met.charge = met_charge
r.add_metabolites({met:1})
# Add GAM constraint
if rxn == 'bof_c':
gam_met = model.metabolites.GAM_const_c
r.add_metabolites({gam_met:1})
model.repair()
print(model.reactions.get_by_id(rxn).reaction)
print('')
print(model.metabolites.get_by_id(met.id).formula)
print('')
print(model.metabolites.get_by_id(met.id).formula_weight)
print('')
print(model.reactions.get_by_id(rxn).check_mass_balance())
return model
#############################################
#### Simulate growth with all constraints####
#############################################
def figure_2LL(model):
## Run all 27 parameter combos and capture:
#### - growth rate
#### - biomass
import math as m
cols = ['ID','GR','mgDW','Cells']
data_out = pd.DataFrame(columns=cols)
Po_vals = ['mean','ub','lb']
a_star_vals = ['mean','ub','lb']
cell_mass = ['mean','ub','lb']
a_star_all = pd.read_csv('pt_a_star.csv',index_col=0,header=0)
rel = pd.read_csv('fluor_lamp.csv',header=0)
xsec = 80. #culture cross sectional area in cm2
path_len = 4.7 #cm
volume = 375.
time_interval = 20
for p in Po_vals:
for a in a_star_vals:
for c in cell_mass:
sample_id = p+'_'+a+'_'+c
model = model
if a == 'mean':
a_star = a_star_all['LL']
elif a == 'ub':
a_star = a_star_all['LL_UB']
else:
a_star = a_star_all['LL_LB']
if c == 'mean':
mgCell = 19.1/1e9
elif c == 'ub':
mgCell = 21.1/1e9
else:
mgCell = 17.1/1e9
innoc = 2.8e6*volume # cells/mL * total mL
iDW = mgCell*innoc # initial culture biomass
gDW = iDW
cells = (gDW/mgCell)
photon_usage = pd.DataFrame(data=[0,0],index = ['Delivered','Absorbed'],columns=['0'])
biomass = pd.DataFrame(data=[gDW,cells],index = ['Biomass','Cells'],columns=['0']) # add it up at the end of each simulation
o2_check = pd.DataFrame(data=[0],index = ['Max oxygen evolution'],columns=['0'])
for t in range(time_interval,1440+time_interval,time_interval): #One simulation every 20 minutes from t=0 to t=24 hrs (1440 min)
import math as m
interval_bm = 0 #initializes the total biomass
photon_count = 0
atten = np.zeros(len(a_star)) #captures light attenuation
tmp_o2_evo = 0
gDW = biomass[str(t-time_interval)]['Biomass'] #mg DW
cells = (gDW/mgCell)
if p == 'mean':
Ps = 2.94e-11 # umol O2 cell-1 s-1
alpha = 9.82e-2
beta = 0.0
resp = 1.83e-12
elif p == 'ub':
Ps = 3.06e-11 # umol O2 cell-1 s-1
alpha = 9.75e-2
beta = 0.0
resp = 1.75e-12
else:
Ps = 2.83e-11 # umol O2 cell-1 s-1
alpha = 9.91e-2
beta = 0.0
resp = 1.92e-12
irrad = 60.
# Photon_flux is the initial amount of light delivered to the culture at each wavelength from 400-700 nm
photon_flux = (rel['rel_height'].values)*irrad*xsec/10000*time_interval*60. #umol/(m2*s) * cm2 * 1m2/10000cm2 * 60s/min * min = umol photons/time interval
total_photons = sum(photon_flux)
photon_usage[str(t)]=[total_photons,0]
total_absorbed = 0.
ti_o2 = 0.
for nm in range(len(photon_flux)):
abs_coeff = a_star[400+nm] # a* value for the given nm (cm2/cell)
Io = photon_flux[nm] # incident photon flux at this nm at this slice (umol_photon/time_interval)
Ia = Io-Io*(m.exp(-1*abs_coeff*cells/volume*path_len))
nm_abs = Ia
total_absorbed = total_absorbed+nm_abs
conv_abs = total_absorbed/time_interval/60./(cells) # converts abs to O2 evo curve units umol/TI * TI/min * min/s * 1/cells => umol/(mgchla*s)
slice_o2 = (Ps*(1-m.exp(-1*alpha*conv_abs/Ps))*(m.exp(-1*beta*conv_abs/Ps)))-resp #umol O2 cell-1 s-1
ti_o2 = ti_o2+(slice_o2*(cells)*60.*time_interval) #umol O2 cell-1 s-1 * cells * s/min * min/TI = umol O2/TI
o2_check[str(t)]=ti_o2
o2evo = ti_o2
model.reactions.EX_o2_e.upper_bound = o2evo # o2evo
model.reactions.EX_o2_e.lower_bound = 0.9*o2evo#0. # <----Po Constraint
photon_usage[str(t)]['Absorbed']=total_absorbed
ngam = resp*60.*time_interval*cells
model.reactions.NGAM.lower_bound = ngam
cef_rate = 5.*gDW/60.*time_interval #CEF sets CEF_h upper bound. umol /(mgDW*h) * mgDW * h/60min * min/TI = umol/TI
model.reactions.CEF_h.upper_bound = cef_rate
model.reactions.EX_photon_e.lower_bound = total_absorbed*-1.
model.reactions.EX_photon_e.upper_bound = total_absorbed*-0.9999999
###### Parameters for PSII fluorescence
## Fv/Fm
FvFm_LL = 0.69
## Calculate Y(II) based on absorbed
abs_conv = total_absorbed/time_interval/60./(cells)
yII_LL = 0.7016*np.exp(-8.535e8*abs_conv)
# yII_LL2 = (2.77e16*(m.pow(abs_conv,2))-(2.51e8*abs_conv)+6.97e-1)
## Y(NPQ)
yNPQ_LL = 1./(1.+(np.power(2.3e-9/abs_conv,3.)))
if yNPQ_LL < 0:
yNPQ_LL = 0.
### Constraints
phoYII = round(yII_LL,2) # Y(II)
regNPQ = round(yNPQ_LL,2) # Y(NPQ)
regFvFm = round((1-FvFm_LL),2) # Photons lost upstream of PSII (1-Fv/Fm)
unrNPQ = round((1-phoYII-regNPQ-regFvFm),2) # Y(NO)
### Edit model constraints with the appropriate values
## PHO_PSIIt_u
rxn = model.reactions.PHO_PSIIt_u
## reset the stoich
for met,s in rxn.metabolites.items():
stoich = s*-1
temp_dict = {met:stoich}
rxn.add_metabolites(temp_dict)
m1 = model.metabolites.photon_YII_u
m2 = model.metabolites.photon_YNPQ_u
m4 = model.metabolites.photon_YNO_u
m3 = model.metabolites.photon_h
m5 = model.metabolites.get_by_id('photon_1-FvFm_u')
rxn.add_metabolites({m1:phoYII,
m2:regNPQ,
m4:unrNPQ,
m5:regFvFm,
m3:-1.})
model.reactions.DM_photon_c.upper_bound = 0. # constrained <----hv Constraint
# Add D1 damage cost uncoupled to PSII
## If uncoupled, set the lower and upper bounds to the experimentally determined values
# Damage rates
D1_rate = 7e-6# # LL: umol D1/mgDW h-1 <---- D1 Constraint
D1_rate = D1_rate * gDW/60.# ugD1 ugDW-1 min-1 * mgDW * 1h/60 min = umolD1 min-1
D1_rate = D1_rate * time_interval # umolD1 min-1 * min/TI = umol D1/TI
model.reactions.NGAM_D1_u.lower_bound = D1_rate #
model.reactions.NGAM_D1_u.upper_bound = 1.0001*(D1_rate) #
## Solve the model
model.objective = 'bof_c'
solution = model.optimize()
if solution.status == 'optimal':
obj_rxn = model.reactions.bof_c
biomass[str(t)]=(gDW+obj_rxn.x,(gDW+obj_rxn.x)/(mgCell))
### collect data
if t == 1440:
dry_weight = biomass['1420']['Biomass']
cell_count = biomass['1420']['Cells']
mu = np.log(biomass['1440']['Cells']/biomass['0']['Cells'])/(1440/60)
data_out = data_out.append({'ID':sample_id,'GR':mu,'mgDW':dry_weight,'Cells':cell_count},ignore_index=True)
return data_out
def figure_2HL(model):
## Run all 27 parameter combos and capture:
#### - growth rate
#### - biomass
import math as m
cols = ['ID','GR','mgDW','Cells']
data_out = pd.DataFrame(columns=cols)
Po_vals = ['mean','ub','lb']
a_star_vals = ['mean','ub','lb']
cell_mass = ['mean','ub','lb']
a_star_all = pd.read_csv('pt_a_star.csv',index_col=0,header=0)
rel = pd.read_csv('fluor_lamp.csv',header=0)
xsec = 80. #culture cross sectional area in cm2
path_len = 4.7 #cm
volume = 375.
time_interval = 20
for p in Po_vals:
for a in a_star_vals:
for c in cell_mass:
sample_id = p+'_'+a+'_'+c
model = model
if a == 'mean':
a_star = a_star_all['HL']
elif a == 'ub':
a_star = a_star_all['HL_UB']
else:
a_star = a_star_all['HL_LB']
if c == 'mean':
mgCell = 20.4/1e9
elif c == 'ub':
mgCell = 21.8/1e9
else:
mgCell = 19.0/1e9
innoc = 3.5e6*volume # cells/mL * total mL
iDW = mgCell*innoc # initial culture biomass
gDW = iDW
cells = (gDW/mgCell)
photon_usage = pd.DataFrame(data=[0,0],index = ['Delivered','Absorbed'],columns=['0'])
biomass = pd.DataFrame(data=[gDW,cells],index = ['Biomass','Cells'],columns=['0']) # add it up at the end of each simulation
o2_check = | pd.DataFrame(data=[0],index = ['Max oxygen evolution'],columns=['0']) | pandas.DataFrame |
import sys
import os
import logging
import datetime
import pandas as pd
from job import Job, Trace
from policies import ShortestJobFirst, FirstInFirstOut, ShortestRemainingTimeFirst, QuasiShortestServiceFirst
sys.path.append('..')
def simulate_vc(trace, vc, placement, log_dir, policy, logger, start_ts, *args):
if policy == 'sjf':
scheduler = ShortestJobFirst(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'fifo':
scheduler = FirstInFirstOut(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'srtf':
scheduler = ShortestRemainingTimeFirst(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'qssf':
scheduler = QuasiShortestServiceFirst(
trace, vc, placement, log_dir, logger, start_ts, args[0])
scheduler.simulate()
logger.info(f'Finish {vc.vc_name}')
return True
def get_available_schedulers():
return ['fifo', 'sjf', 'srtf', 'qssf']
def get_available_placers():
return ['random', 'consolidate', 'consolidateFirst']
def trace_process(dir, date_range):
start = '2020-04-01 00:00:00'
df = pd.read_csv(dir+'/cluster_log.csv', parse_dates=['submit_time'], usecols=['job_id', 'user', 'vc', 'jobname', 'gpu_num',
'cpu_num', 'state', 'submit_time', 'duration'])
# Consider gpu jobs only
df = df[df['gpu_num'] > 0]
# VC filter
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vc_list = vc_dict.keys()
df = df[df['vc'].isin(vc_list)]
df = df[df['submit_time'] >= pd.Timestamp(start)]
df['submit_time'] = df['submit_time'].apply(
lambda x: int(datetime.datetime.timestamp(pd.Timestamp(x))))
# Normalizing
df['submit_time'] = df['submit_time'] - df.iloc[0]['submit_time']
df['remain'] = df['duration']
df[['start_time', 'end_time']] = sys.maxsize
df[['ckpt_times', 'queue', 'jct']] = 0
df['status'] = None
# Slicing simulation part
begin = (pd.Timestamp(date_range[0])-pd.Timestamp(start)).total_seconds()
end = (pd.Timestamp(date_range[1])-pd.Timestamp(start)).total_seconds()
df = df[(df['submit_time'] >= begin) & (df['submit_time'] <= end)]
df.sort_values(by='submit_time', inplace=True)
df.reset_index(inplace=True, drop=True)
return df, begin
def trace_philly_process(dir, date_range):
start = '2017-10-01 00:00:00'
df = pd.read_csv(dir+'/cluster_log.csv', parse_dates=['submit_time'], usecols=['user', 'vc', 'jobname', 'gpu_num',
'state', 'submit_time', 'duration'])
# Consider gpu jobs only
df = df[df['gpu_num'] > 0]
# VC filter
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vc_list = vc_dict.keys()
df = df[df['vc'].isin(vc_list)]
df = df[df['submit_time'] >= pd.Timestamp(start)]
df['submit_time'] = df['submit_time'].apply(
lambda x: int(datetime.datetime.timestamp(pd.Timestamp(x))))
df['state'] = df['state'].replace('Pass', 'COMPLETED')
df['state'] = df['state'].replace('Failed', 'FAILED')
df['state'] = df['state'].replace('Killed', 'CANCELLED')
# Normalizing
df['submit_time'] = df['submit_time'] - df.iloc[0]['submit_time']
df['remain'] = df['duration']
df[['start_time', 'end_time']] = sys.maxsize
df[['ckpt_times', 'queue', 'jct']] = 0
df['status'] = None
# Slicing simulation part
begin = (pd.Timestamp(date_range[0])-pd.Timestamp(start)).total_seconds()
end = (pd.Timestamp(date_range[1])-pd.Timestamp(start)).total_seconds()
df = df[(df['submit_time'] >= begin) & (df['submit_time'] <= end)]
df.sort_values(by='submit_time', inplace=True)
df.reset_index(inplace=True, drop=True)
return df, begin
def trace_parser(df):
trace = Trace()
for _, series in df.iterrows():
trace.append_job(Job(series))
trace.sort_jobs('submit_time')
return trace
def logger_init(file):
logger = logging.getLogger()
handler_file = logging.FileHandler(f'{file}.log', 'w')
handler_stream = logging.StreamHandler() # sys.stdout
logger.setLevel(logging.INFO)
handler_file.setLevel(logging.INFO)
handler_stream.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s | %(processName)s | %(message)s', datefmt='%Y %b %d %H:%M:%S')
handler_file.setFormatter(formatter)
handler_stream.setFormatter(formatter)
logger.addHandler(handler_file)
logger.addHandler(handler_stream)
return logger
def cluster_concatenate(policy, placer, log_dir, dir):
prefix = f'{policy}_{placer}'
if not os.path.exists(log_dir+'/all'):
os.mkdir(log_dir+'/all')
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vcs = list(vc_dict.keys())
'''Log'''
cluster_log = pd.DataFrame()
for vc in vcs:
vc_log = pd.read_csv(f'{log_dir}/{vc}/{prefix}_{vc}_log.csv')
cluster_log = pd.concat([cluster_log, vc_log])
cluster_log.sort_values(by='submit_time', inplace=True)
cluster_log.to_csv(f'{log_dir}/all/{prefix}_all_log.csv', index=False)
'''Seq'''
cluster_seq = pd.DataFrame()
add_list = ['total_gpu_num', 'idle_gpu_num', 'pending_gpu_num', 'running_gpujob_num', 'pending_gpujob_num',
'pending_job_num_less_8', 'total_node_num', 'consolidate_node_num', 'shared_node_num']
for vc in vcs:
vc_seq = pd.read_csv(f'{log_dir}/{vc}/{prefix}_{vc}_seq.csv')
if len(cluster_seq) == 0:
cluster_seq = vc_seq
continue
cluster_seq[add_list] = cluster_seq[add_list] + vc_seq[add_list]
cluster_seq.dropna(inplace=True)
cluster_seq = cluster_seq.astype(int)
cluster_seq['gpu_utilization'] = ((cluster_seq['total_gpu_num'] - cluster_seq['idle_gpu_num']) /
cluster_seq['total_gpu_num']).round(3)
cluster_seq.to_csv(f'{log_dir}/all/{prefix}_all_seq.csv', index=False)
def cluster_analysis(placer, log_dir, dir):
'''Generate Algorithm Comparsion CSV'''
# ignore_warm_up = start_ts + 7*24*3600
prefix_list = []
for i in get_available_schedulers():
prefix = f'{i}_{placer}'
prefix_list.append(prefix)
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vcs = list(vc_dict.keys())
vcs.append('all')
jct_avg = pd.DataFrame()
que_avg = | pd.DataFrame() | pandas.DataFrame |
from multiprocessing import Pool #witness the power
import wikipedia
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from bs4 import BeautifulSoup
import seaborn as sns
import pickle
import requests
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from fuzzywuzzy import fuzz
from collections import defaultdict
base_url = "https://www.rottentomatoes.com"
starting_url = "https://www.rottentomatoes.com/top/"
def percentage_missing(df):
"""
Calculates missing data for each column in a dataframe.
This function is informative.
Inputs:
- df: Pandas dataframe
Returns:
- None
"""
for c in df.columns:
missing_perc = (sum( | pd.isnull(df[c]) | pandas.isnull |
import pandas as pd
import numpy as np
import json
import os
from BALSAMIC.utils.constants import HSMETRICS_QC_CHECK
from BALSAMIC.utils.rule import get_sample_type
def read_hs_metrics(hs_metrics_file: str):
"""Reads the HS metrics (json-format) and returns it as a DataFrame
Args:
hs_metrics_file: A string path to the file
Returns:
metrics_df: DataFrame
"""
with open(hs_metrics_file):
metrics_df = pd.read_json(hs_metrics_file)
return metrics_df
def read_qc_table(qc_table: dict):
"""Reads the QC-table (json-format) and returns it as a DataFrame
Args:
qc_table: Dictionary imported from constants
Returns:
qc_df: DataFrame
"""
qc_df = | pd.DataFrame.from_dict(qc_table) | pandas.DataFrame.from_dict |
"""Metrics to evaluate ICD-9 coding models.
"""
import pandas as pd
def basic_micro_metrics(true_labels, predictions, delimiter=';'):
'''Computes basic micro metrics (those not requiring scores).
Micro metrics are averaged across all predictions and labels.
Arguments
---------
true_labels : interable
Contains sets of true labels as strings or container that can be cast to
a set.
predictions : iterable
Contains sets of predictions as strings or container that can be cast to
a set.
delimiter : char, optional
A delimiter to use if the labels are formatted as strings.
Returns
-------
metrics : dict
A dictionary containing micro-averaged precision, recall, and f1 score.
'''
true_pos = 0
false_pos = 0
false_neg = 0
true_labels = | pd.Series(true_labels) | pandas.Series |
import pandas as pd
from scipy.stats import ttest_rel
def t_test_report(perf_df_a, tag_a, perf_df_b, tag_b, metric_cols):
for col in metric_cols:
report = dict(A=tag_a, B=tag_b, metric=col,
mean_A=perf_df_a[col].mean(),
std_A=perf_df_a[col].std(),
mean_B=perf_df_b[col].mean(),
std_B=perf_df_b[col].std())
t, p = ttest_rel(perf_df_a[col], perf_df_b[col])
report["t-statistic"] = t
report["p-value"] = p
yield report
if __name__ == "__main__":
metric_cols = ["test_AUPRC", "test_AUROC", "test_AVGRANK"]
gwava_perf = pd.read_csv("./experiment_result/gwava_performance_wo_chr5_30_CERENKOV2_1337.tsv", sep="\t", usecols=metric_cols)
c1_perf = | pd.read_csv("./experiment_result/c1_cross_validate_xv_report.tsv", sep="\t", usecols=metric_cols) | pandas.read_csv |
import gzip
import pickle
from os.path import join, expanduser
import pandas as pd
import json
from article_analysis.parse import get_chunk, get_article_ngram_dict, find_doi_chunk_map, load_ngram_dist
import article_analysis.parse_ent as aape
head = -1
verbose = True
keywords = ['hypothesis', 'hypotheses', 'table']
batch_size = 50
fp_gen = expanduser('~/data/jstor/latest/')
fp_gen = '/home/valery/RE_Project/amj_ngrams/latest_listagg'
input_path = fp_gen
output_path = fp_gen
prefix = 'ngrams_dict'
nlp = aape.init_nlp()
with open(join(output_path, 'registry_json.txt')) as file:
registry_dict = json.loads(file.read())
fname = '{0}corpus_clean_dict.pgz'.format(fp_gen)
with gzip.open(fname) as fp:
articles_ds = pickle.load(fp)
all_dois_flat = [v for sublist in registry_dict.values() for v in sublist]
if head > 0:
all_dois_flat = all_dois_flat[:head]
# check boundaries
dois_batched = [all_dois_flat[i:i + batch_size] for i in range(0, len(all_dois_flat), batch_size)]
df_agg = []
for dois_batch, j in zip(dois_batched, range(len(dois_batched))):
if verbose:
print('batch number {0}'.format(j))
dch_dict = get_articles(dois_batch, registry_dict, input_path, prefix)
for doi in dois_batch:
ngram_order = 1
article_ngrams = dch_dict[doi]
ngram_positions_list = article_ngrams[ngram_order]
ixs = []
for keyword in keywords:
if keyword in ngram_positions_list.keys():
ixs += ngram_positions_list[keyword]
print('doi {0}, len ixs {1}'.format(doi, len(ixs)))
carticle = articles_ds[doi]
chunks = aape.get_np_candidates(ixs, carticle, nlp, 1)
total_counts, total_counts_raw, table, tree_dict = aape.choose_popular_np_phrases(chunks)
df = pd.DataFrame(table, columns=['root', 'np', 'count'])
df['doi'] = doi
df_agg.append(df)
df0 = | pd.concat(df_agg) | pandas.concat |
#this is a python script to import as extension in ipynb
#the functions perform transformation on the data used for timeseries analysing
import sklearn
import pandas as pd
import numpy as np
import holoviews as hv
import re
import matplotlib.pyplot as plt
from holoviews import opts
from bokeh.models import LinearAxis, Range1d, GlyphRenderer
from holoviews.plotting.links import RangeToolLink
from IPython.display import display, HTML
from sklearn.preprocessing import StandardScaler
from datetime import datetime, timedelta
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
def dat_facebook_states(df, state):
df = df[df["polygon_name"]==state]
df.index = pd.to_datetime(df["ds"]) - timedelta(days=0)
df = df[df["ds"]< pd.to_datetime('2020-06-27', format='%Y%m%d', errors='ignore')]
return(df[["all_day_bing_tiles_visited_relative_change","all_day_ratio_single_tile_users"]])
def dat_rki_states(rki, state):
rki = rki[rki["Bundesland"]==state][["AnzahlFall", "Refdatum"]].groupby(["Refdatum"]).sum()
rki.index = pd.to_datetime(rki.index)
return(rki)
def dat_apple_states(df, state):
df = df[(df["region"]==state) & (df["transportation_type"]=="driving")].transpose().iloc[6:]
df.index = pd.to_datetime(df.index)
df = df[df.index< pd.to_datetime('2020-06-27', format='%Y%m%d', errors='ignore')]
df.columns = ["driving"]
return df
def dat_google_states(df, state):
df.index = pd.to_datetime(df["date"])
return(df[df["sub_region_1"]==state].loc[:,"retail_and_recreation_percent_change_from_baseline":])
facebook_states = ['Nordrhein-Westfalen', 'Rheinland-Pfalz', 'Saarland',
'Sachsen-Anhalt', 'Sachsen', 'Schleswig-Holstein', 'Th-ringen',
'Baden-W-rttemberg', 'Bayern', 'Brandenburg', 'Bremen',
'Hamburg', 'Hessen', 'Niedersachsen']
#, 'Berlin' , 'Mecklenburg-Vorpommern'
rki_states = ['Nordrhein-Westfalen', 'Rheinland-Pfalz', 'Saarland',
'Sachsen-Anhalt', 'Sachsen', 'Schleswig-Holstein', 'Thüringen',
'Baden-Württemberg','Bayern', 'Brandenburg', 'Bremen',
'Hamburg', 'Hessen', 'Niedersachsen']
#, 'Berlin', 'Mecklenburg-Vorpommern'
apple_states = ['North Rhine-Westphalia', 'Rhineland-Palatinate', 'Saarland',
'Saxony-Anhalt', 'Saxony', 'Schleswig-Holstein', 'Thuringia',
'Baden-Württemberg', 'Bavaria', 'Brandenburg', 'Bremen (state)',
'Hamburg', 'Hesse', 'Lower Saxony']
#, 'Berlin', 'Mecklenburg-Vorpommern'
google_states = ['North Rhine-Westphalia', 'Rhineland-Palatinate', 'Saarland',
'Saxony-Anhalt', 'Saxony', 'Schleswig-Holstein', 'Thuringia',
'Baden-Württemberg', 'Bavaria', 'Brandenburg','Bremen',
'Hamburg', 'Hesse', 'Lower Saxony']
#, 'Berlin','Mecklenburg-Vorpommern'
def shift_days(df, days_min=0, days_max=15, drop_cases=True, drop_na=True):
fin=df
for i in range(1+days_min, days_max+1):
if(drop_cases):
df = df.drop('#cases', axis=1)
dfy = df.shift(i)
dfy.columns = [ s + " t-" + str(i) for s in dfy.columns]
fin = pd.concat([fin, dfy], axis=1)
if(drop_na):
fin = fin.dropna()
return(fin)
def join_dat(tiles, apple, google, rki):
df = tiles.join(apple).join(google).join(rki)
df["AnzahlFall"] = df["AnzahlFall"].fillna(0)
return df
def prepare_data(all_sources=True,
states=True,
drop_cases=False,
drop_na=False,
days_max=10,
days_min=0,
drop_today=True):
google = pd.read_csv("../data/Global_Mobility_Report.csv")
apple = pd.read_csv ("../data/applemobilitytrends-2020-06-29.csv")
facebook = pd.read_csv ("../data/movement-range-2020-08-13.txt", "\t")
rki = pd.read_csv ("../data/RKI_COVID19.csv")
dat = []
for i in range(len(facebook_states)):
df = join_dat(dat_facebook_states(facebook, facebook_states[i]),
dat_apple_states(apple, apple_states[i]),
dat_google_states(google, google_states[i]),
dat_rki_states(rki, rki_states[i]))
df.index.name = "date"
nam = ['bing_tiles_visited',
'single_tile_users', 'driving',
'retail_and_recreation',
'grocery_and_pharmacy',
'parks',
'transit_stations',
'workplaces',
'residential', '#cases']
df.columns = nam
df = shift_days(df, days_min=days_min, days_max=days_max, drop_cases=drop_cases, drop_na=drop_na)
if(states):
df["state"]=i
if(drop_today):
dat.append(df.iloc[:,9:len(df.columns)])
else:
dat.append(df)
return(dat)
def train_test_data(dat, diffs=False, testsize=0.2):
iv_train = []
dv_train = []
iv_test = []
dv_test = []
begin = []
for i in range(len(facebook_states)):
begin.append(dat[i]["#cases"][0])
if(diffs):
dat[i]["#cases"] = dat[i]["#cases"].diff()
dat[i] = dat[i].dropna()
X_train, X_test, y_train, y_test = train_test_split(dat[i].drop("#cases", axis=1),
dat[i]["#cases"], test_size=0.2, shuffle=False)
test_date_range = y_test.index
iv_train.append(X_train)
dv_train.append(y_train)
iv_test.append(X_test)
dv_test.append(y_test)
return([iv_train, dv_train, iv_test, dv_test, begin])
def train_model(iv_train, iv_test, dv_train, n_ests=10):
y_train = | pd.concat(dv_train) | pandas.concat |
import pandas as pd
import propylean.properties as prop
from propylean import streams
# _material_stream_equipment_map and __energy_stream_equipment_map are dictionary of list
# which store index of coming from and going to equipment and type of equipment.
# Structured like {12: [10, CentrifugalPump, 21, PipeSegment],
# 23: [21, PipeSegment, 36, FlowMeter]]}
# were 12th index stream will have data in key no. 12
# stream is coming from equipment index is 10 of type CentrifugalPump and
# going into equipment index is 21 of type PipeSegment.
_material_stream_equipment_map = dict()
_energy_stream_equipment_map = dict()
#Defining generic base class for all equipments with one inlet and outlet
class _EquipmentOneInletOutlet:
items = []
def __init__(self, **inputs) -> None:
"""
DESCRIPTION:
Internal base class to define an equipment with one inlet and outlet.
All final classes inherits from this base class.
Read individual final classed for further description.
PARAMETERS:
tag:
Required: No TODO: Make tag as required or randomly generate a tag.
Type: str
Acceptable values: Any string type
Default value: None
Description: Equipment tag the user wants to provide
dynamic_state:
Required: No
Type: bool
Acceptable values: True or False
Default value: False
Description: If equipment is in dynamic state and inventory is changing.
TODO: Provide dynamic simulation capabilities.
inlet_mass_flowrate:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material inlet flowrate to the equipment.
outlet_mass_flowrate:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material outlet flowrate to the equipment.
design_flowrate:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material design flowrate of the equipment.
inlet_pressure:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material inlet pressure to the equipment.
outlet_pressure:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material outlet pressure to the equipment.
design_pressure:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material design pressure of the equipment.
inlet_temperature:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material inlet temperature to the equipment.
outlet_temperature:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material outlet temperature to the equipment.
design_temperature:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material design temperature of the equipment.
RETURN VALUE:
Type: _EquipmentOneInletOutlet
Description: Object of type _EquipmentOneInletOutlet
ERROR RAISED:
Type: Various
Description:
SAMPLE USE CASES:
>>> class NewEquipment(_EquipmentOneInletOutlet):
......
"""
self.tag = None if 'tag' not in inputs else inputs['tag']
self.dynamic_state = False if 'dynamic_state' not in inputs else inputs['dynamic_state']
#Flow properties
self._inlet_mass_flowrate = prop.MassFlowRate() if 'inlet_mass_flowrate' not in inputs else prop.MassFlowRate(inputs['inlet_mass_flowrate'])
self._outlet_mass_flowrate = prop.MassFlowRate() if 'outlet_mass_flowrate' not in inputs else prop.MassFlowRate(inputs['outlet_mass_flowrate'])
# TODO: Design pressure calcs
self.design_flowrate = prop.MassFlowRate() if 'design_flowrate' not in inputs else prop.MassFlowRate(inputs['design_flowrate'])
#Pressure properties
self._inlet_pressure = prop.Pressure() if 'inlet_pressure' not in inputs else prop.Pressure(inputs['inlet_pressure'])
self._outlet_pressure = prop.Pressure() if 'outlet_pressure' not in inputs else prop.Pressure(inputs['outlet_pressure'])
if 'pressure_drop' in inputs:
self.pressure_drop = prop.Pressure(inputs['pressure_drop'])
self.design_pressure = prop.Pressure() if 'design_pressure' not in inputs else prop.Pressure(inputs['design_pressure'])
#Temperature properties
self._inlet_temperature = prop.Temperature() if 'inlet_temperature' not in inputs else prop.Temperature(inputs['inlet_temperature'])
self._outlet_temperature = prop.Temperature() if 'outlet_temperature' not in inputs else prop.Temperature(inputs['outlet_temperature'])
self.design_temperature = prop.Temperature() if 'design_temperature' not in inputs else prop.Temperature(inputs['design_temperature'])
#Inlet and outlet material and energy streams
self._inlet_material_stream_tag = None
self._outlet_material_stream_tag = None
self._inlet_energy_stream_tag = None
self._outlet_energy_stream_tag = None
#Other Porperties
self._is_disconnection = False
@property
def inlet_pressure(self):
return self._inlet_pressure
@inlet_pressure.setter
def inlet_pressure(self, value):
if isinstance(value, tuple):
self._inlet_pressure.unit = value[1]
value = value[0]
self._inlet_pressure.value = value
self._outlet_pressure.value = self._inlet_pressure.value - self.pressure_drop
@property
def outlet_pressure(self):
return self._outlet_pressure
@outlet_pressure.setter
def outlet_pressure(self,value):
if isinstance(value, tuple):
self._outlet_pressure.unit = value[1]
value = value[0]
self._outlet_pressure.value = value
self._inlet_pressure.value = self._outlet_pressure.value + self.pressure_drop
@property
def pressure_drop(self):
if (self._inlet_pressure.value == None or
self._outlet_pressure.value == None or
self._inlet_mass_flowrate.value == 0):
value = 0
else:
value = self._inlet_pressure.value - self._outlet_pressure.value
return prop.Pressure(value=value, unit=self._inlet_pressure.unit)
@pressure_drop.setter
def pressure_drop(self, value):
if isinstance(value, tuple):
self._outlet_pressure.unit = value[1]
value = value[0]
if self._inlet_pressure.value != None:
self._outlet_pressure.value = self._inlet_pressure.value - value
elif self._outlet_pressure.value != None:
self._inlet_pressure.value = self._outlet_pressure.value + value
else:
raise Exception("Error! Assign inlet value or outlet outlet before assigning differential")
@property
def inlet_temperature(self):
return self._inlet_temperature
@inlet_temperature.setter
def inlet_temperature(self, value):
if isinstance(value, tuple):
self._inlet_temperature.unit = value[1]
value = value[0]
self._inlet_temperature.value = value
@property
def outlet_temperature(self):
return self._outlet_temperature
@outlet_temperature.setter
def outlet_temperature(self,value):
if isinstance(value, tuple):
self._outlet_temperature.unit = value[1]
value = value[0]
self._outlet_temperature.value = value
@property
def inlet_mass_flowrate(self):
return self._inlet_mass_flowrate.value
@inlet_mass_flowrate.setter
def inlet_mass_flowrate(self, value):
self._inlet_mass_flowrate.value = value
self._outlet_mass_flowrate = self._inlet_mass_flowrate.value + self.inventory_change_rate
@property
def outlet_mass_flowrate(self):
return self._outlet_mass_flowrate
@outlet_mass_flowrate.setter
def outlet_mass_flowrate(self, value):
self._outlet_mass_flowrate = value
self._inlet_mass_flowrate.value = self._outlet_mass_flowrate - self.inventory_change_rate
@property
def inventory_change_rate(self):
if not self.dynamic_state:
return 0
if (self._inlet_mass_flowrate.value == None or
self._outlet_mass_flowrate == None):
return None
return self._inlet_mass_flowrate.value - self._outlet_mass_flowrate
@inventory_change_rate.setter
def inventory_change_rate(self, value):
if self._inlet_mass_flowrate.value != None:
self._outlet_mass_flowrate = self._inlet_mass_flowrate.value - value
elif self._outlet_mass_flowrate != None:
self._inlet_mass_flowrate.value = self._outlet_mass_flowrate + value
else:
raise Exception("Error! Assign inlet value or outlet outlet before assigning differential")
@classmethod
def get_equipment_index(cls, tag):
for index, equipment in enumerate(cls.items):
if equipment.tag == tag:
return index
return None
def get_stream_tag(self, stream_type, direction):
"""
DESCRIPTION:
Class method to get stream tag using steam type and the direction.
PARAMETERS:
stream_type:
Required: Yes
Type: str
Acceptable values: 'm', 'mass', 'e', 'energy'
Description: Type of stream user wants to get tag of.
direction:
Required: Yes
Type: str
Acceptable values: 'in', 'out', 'inlet' or 'outlet'
Description: Direction of stream with respect to equipment user wants to get tag of.
RETURN VALUE:
Type: str
Description: Tag value of stream user has assigned to the stream
ERROR RAISED:
Type: General TODO
Description: Raises error if arguments are incorrect
SAMPLE USE CASES:
>>> eq1.get_stream_tag('m', 'out')
>>> eq1.get_stream_tag('energy', 'in')
"""
if stream_type.lower() in ['material', 'mass', 'm']:
stream_tag = [self._inlet_material_stream_tag, self._outlet_material_stream_tag]
elif stream_type.lower() in ['energy', 'power', 'e', 'p']:
stream_tag = [self._inlet_energy_stream_tag, self._outlet_energy_stream_tag]
else:
raise Exception('Incorrect stream_type specified! Provided \"'+stream_type+'\". Can only be "material/mass/m" or "energy/e/power/p"]')
if direction.lower() in ['in', 'inlet']:
return stream_tag[0]
elif direction.lower() in ['out', 'outlet']:
return stream_tag[1]
else:
raise Exception('Incorrect direction specified! Provided \"'+direction+'\". Can only be ["in", "out", "inlet", "outlet"]')
def connect_stream(self,
stream_object=None,
direction=None,
stream_tag=None,
stream_type=None):
"""
DESCRIPTION:
Class method to connect a stream object with equiment.
PARAMETERS:
stream_object:
Required: No if stream_tag is provided else Yes
Type: EnergyStream or MaterialStream
Acceptable values: object of specified stream types
Default value: None
Description: Stream object user wants to connect the equipment with.
direction:
Required: Yes for material stream. For energy stream not needed
Type: str
Acceptable values: 'in', 'out', 'inlet' or 'outlet'
Default value: None
Description: Direction in which stream should be with respect to equipment.
stream_tag:
Required: No if stream_object is provided else Yes
Type: str
Acceptable values: stream tag provided by user
Default value: None
Description: Stream object with known stream_tag user wants to connect the equipment with.
stream_type:
Required: No if stream_object provided
Type: str
Acceptable values: 'm', 'mass', 'e', 'energy'
Description: Type of stream user wants to connect.
RETURN VALUE:
Type: bool
Description: True is returned if connection is successful else False
ERROR RAISED:
Type: General
Description: Error raised if arguments are wrong
SAMPLE USE CASES:
>>> eq1.connect_stream(en1)
>>> eq1.connect_stream(direction='out', stream_tag='Pump-outlet', stream_type='m')
"""
if stream_object is not None:
if not (isinstance(stream_object, streams.EnergyStream) or
isinstance(stream_object, streams.MaterialStream)):
raise Exception("Stream object should be of type EnergyStream or Material Stream not "+
+type(stream_object))
stream_tag = stream_object.tag
if isinstance(stream_object, streams.MaterialStream):
stream_type = 'material'
elif isinstance(stream_object, streams.EnergyStream):
stream_type = 'energy'
elif not self._is_disconnection and stream_tag is None:
raise Exception("Either of Stream Object or Stream Tag is required for connection!")
if stream_type.lower() not in ['material', 'mass', 'm', 'energy', 'power', 'e', 'p']:
raise Exception('Incorrect stream_type specified! Provided \"'+stream_type+'\". Can only be "material/mass/m" or "energy/e/power/p"]')
if direction.lower() not in ['in', 'inlet', 'out', 'outlet']:
raise Exception('Incorrect direction specified! Provided \"'+direction+'\". Can only be ["in", "out", "inlet", "outlet"]')
stream_index = streams.get_stream_index(stream_tag, stream_type)
is_inlet = True if direction.lower() in ['in', 'inlet'] else False
mapping_result = self._stream_equipment_mapper(stream_index, stream_type, is_inlet)
if self._is_disconnection:
stream_tag = None
self._is_disconnection = False
if stream_type.lower() in ['material', 'mass', 'm']:
if direction.lower() in ['in', 'inlet']:
self._inlet_material_stream_tag = stream_tag
else:
self._outlet_material_stream_tag = stream_tag
else:
if direction.lower() in ['in', 'inlet']:
self._inlet_energy_stream_tag = stream_tag
else:
self._outlet_energy_stream_tag = stream_tag
return mapping_result
def disconnect_stream(self,
stream_object=None,
direction=None,
stream_tag=None,
stream_type=None):
"""
DESCRIPTION:
Class method to disconnect a stream object from equiment.
PARAMETERS:
stream_object:
Required: No if stream_tag is provided else Yes
Type: EnergyStream or MaterialStream
Acceptable values: object of specified stream types
Default value: None
Description: Stream object user wants to disconnect the equipment with.
direction:
Required: Yes is stream_object or stream_tag not provided
Type: str
Acceptable values: 'in', 'out', 'inlet' or 'outlet'
Default value: None
Description: Direction in which stream should be with respect to equipment.
stream_tag:
Required: No if stream_object is provided else Yes
Type: str
Acceptable values: stream tag provided by user
Default value: None
Description: Stream object with known stream_tag user wants to disconnect the equipment from.
stream_type:
Required: No if stream_object provided
Type: str
Acceptable values: 'm', 'mass', 'e', 'energy'
Description: Type of stream user wants to disconnect.
RETURN VALUE:
Type: bool
Description: True is returned if connection is successful else False
ERROR RAISED:
Type: General
Description: Error raised if arguments are wrong
SAMPLE USE CASES:
>>> eq1.disconnect_stream(s1)
>>> eq1.disconnect_stream(stream_tag='Pump-outlet')
>>> eq1.disconnect_stream(direction='in', stream_type="energy")
"""
def define_index_direction(tag):
if tag == self._inlet_material_stream_tag:
stream_type = "material"
direction = "in"
elif tag == self._outlet_material_stream_tag:
stream_type = "material"
direction = "out"
elif tag == self._inlet_energy_stream_tag:
stream_type = "energy"
direction = "in"
elif tag == self._outlet_energy_stream_tag:
stream_type = "energy"
direction = "out"
return stream_type, direction
if stream_object is not None:
stream_type, direction = define_index_direction(stream_object.tag)
elif stream_tag is not None:
stream_type, direction = define_index_direction(stream_tag)
elif (direction is not None and
stream_type is not None):
stream_tag = self.get_stream_tag(stream_type, direction)
stream_type, direction = define_index_direction(stream_tag)
else:
raise Exception("To disconnect stream from equipment, provide either just connected stream object or \
just stream tag or just direction & stream type")
self._is_disconnection = True
return self.connect_stream(stream_object,
direction,
stream_tag,
stream_type)
def _stream_equipment_mapper(self, stream_index, stream_type, is_inlet):
if stream_index is None or isinstance(stream_index, list):
return False
e_type, e_index = (3, 2) if is_inlet else (1, 0)
global _material_stream_equipment_map
global _energy_stream_equipment_map
stream_equipment_map = _material_stream_equipment_map if stream_type == 'material' else _energy_stream_equipment_map
equipment_type = type(self)
equipment_index = self.get_equipment_index(self.tag)
def set_type_index():
old_equipment_type = stream_equipment_map[stream_index][e_type]
old_equipment_index = stream_equipment_map[stream_index][e_index]
stream_equipment_map[stream_index][e_type] = equipment_type if not self._is_disconnection else None
stream_equipment_map[stream_index][e_index] = equipment_index if not self._is_disconnection else None
if (old_equipment_index is not None
and old_equipment_type is not None):
old_equipment_obj = old_equipment_type.list_objects()[old_equipment_index]
old_equipment_obj.disconnect_stream(stream_type, 'in' if is_inlet else 'out')
raise Warning("Equipment type " + old_equipment_type +
" with tag " + old_equipment_obj.tag +
" was disconnected from stream type " + stream_type +
" with tag " + self.get_stream_tag(stream_type,
'in' if is_inlet else 'out'))
try:
set_type_index()
except:
try:
stream_equipment_map[stream_index] = [None, None, None, None]
set_type_index()
except Exception as e:
raise Exception("Error occured in equipment-stream mapping:", e)
if stream_type == 'm':
_material_stream_equipment_map = stream_equipment_map
else:
_energy_stream_equipment_map = stream_equipment_map
return True
#Defining generic base class for all equipments with multiple inlet and outlet. TO BE UPDATED!!!!!!
class _EquipmentMultipleInletOutlet:
def __init__(self) -> None:
self._inlet_pressure.value = list()
#Defining generic class for all types of pressure changers like Pumps, Compressors and Expanders
class _PressureChangers(_EquipmentOneInletOutlet):
def __init__(self,**inputs) -> None:
self._differential_pressure = prop.Pressure()
if 'pressure_drop' in inputs:
inputs['differential_pressure'] = -1 * inputs['pressure_drop']
del inputs['pressure_drop']
if 'inlet_pressure' in inputs:
inputs['suction_pressure'] = inputs['inlet_pressure']
del inputs['inlet_pressure']
if 'outlet_pressure' in inputs:
inputs['discharge_pressure'] = inputs['outlet_pressure']
del inputs['outlet_pressure']
super().__init__(**inputs)
if 'suction_pressure' in inputs:
self._inlet_pressure.value = inputs['suction_pressure']
if ('differential_pressure' in inputs and 'performance_curve' in inputs or
'differential_pressure' in inputs and 'discharge_pressure' in inputs or
'performance_curve' in inputs and 'discharge_pressure' in inputs):
raise Exception('Please input only one of discharge_pressure, differential_pressure or performance_curve \
with suction pressure')
if 'discharge_pressure' in inputs:
if (self.suction_pressure != None and
'differential_pressure' in inputs):
raise Exception("Please enter ethier one of discharge_pressure or differential_pressure")
self._outlet_pressure = inputs['discharge_pressure']
if 'differential_pressure' in inputs:
if ((self.suction_pressure != None or self.discharge_pressure != None) and
'performance_curve' in inputs):
raise Exception('Please input only one of differential pressure or performance_curve')
self.differential_pressure = inputs['differential_pressure']
self._performance_curve = | pd.DataFrame() | pandas.DataFrame |
"""dynamic user-input-responsive part of mood, and mood graphs"""
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from scipy.signal import lsim, lti
from scipy.signal.ltisys import StateSpaceContinuous
from tqdm.autonotebook import tqdm
from IPython.display import display
from persistence.response_cache import (
ResponseCache,
UserInputIdentifier,
)
from feels.mood import (
random_mood_at_pst_datetime,
logit_diff_to_pos_sent,
pos_sent_to_logit_diff,
)
from util.past import MILESTONE_TIMES
from util.times import now_pst, fromtimestamp_pst
MOOD_IMAGE_DIR = "data/mood_images/"
STEP_SEC = 30 * 1
TAU_SEC = 3600 * 12
TAU_SEC_2ND = 60 * 60
WEIGHTED_AVG_START_TIME = pd.Timestamp("2021-01-04 09:10:00")
WEIGHTED_AVG_P75_WEIGHT = 0.5
RESPONSE_SCALE_BASE = 0.15 # 0.1 # 0.2 #0.5
DETERMINER_CENTER = -3.1 # -2.4 # -1.5 #-2
DETERMINER_CENTER_UPDATES = {
pd.Timestamp("2020-08-20 01:00:00"): -2.4,
pd.Timestamp("2020-08-25 14:00:00"): -2.0,
pd.Timestamp("2020-08-31 09:15:00"): -2.4,
pd.Timestamp("2020-09-16 06:00:00"): -2.1,
pd.Timestamp("2020-10-28 17:00:00"): -2.4,
pd.Timestamp("2020-11-04 11:00:00"): -2.78,
pd.Timestamp("2020-11-13 19:00:00"): -2.7,
pd.Timestamp("2020-11-15 07:30:00"): -2.6,
pd.Timestamp("2020-12-04 07:00:00"): -2.5,
pd.Timestamp("2020-12-10 08:35:00"): -2.35,
pd.Timestamp("2020-12-10 23:45:00"): -2.0,
pd.Timestamp("2020-12-18 15:35:00"): -2.2,
pd.Timestamp("2020-12-21 15:25:00"): -2.3,
WEIGHTED_AVG_START_TIME: 0.0,
pd.Timestamp("2021-02-08 09:25:00"): -0.25,
pd.Timestamp("2021-02-14 17:55:00"): -0.125,
pd.Timestamp("2021-02-15 17:25:00"): 0,
pd.Timestamp("2021-02-16 17:45:00"): 0.5,
pd.Timestamp("2021-02-17 12:45:00"): 0,
pd.Timestamp("2021-02-26 17:30:00"): 0.5,
pd.Timestamp("2021-02-27 16:05:00"): 0.,
pd.Timestamp("2021-03-15 09:55:00"): -0.2,
pd.Timestamp("2021-03-15 19:50:00"): -0.4,
pd.Timestamp("2021-03-20 06:55:00"): 0.,
pd.Timestamp("2021-03-24 22:40:00"): -0.3,
pd.Timestamp("2021-03-31 12:25:00"): -0.5,
pd.Timestamp("2021-04-09 07:10:00"): -0.25,
pd.Timestamp("2021-05-05 17:00:00"): 0.,
pd.Timestamp("2021-05-07 18:15:00"): -0.25,
pd.Timestamp("2021-05-12 07:50:00"): 0.,
pd.Timestamp("2021-05-22 09:50:00"): -0.125,
pd.Timestamp("2021-05-23 07:15:00"): -0.25,
pd.Timestamp("2021-06-05 12:05:00"): -0.5,
pd.Timestamp("2021-06-07 22:35:00"): -0.3,
pd.Timestamp("2021-06-08 13:15:00"): 0.,
pd.Timestamp("2021-06-14 06:55:00"): -0.25,
pd.Timestamp("2021-06-15 18:08:00"): 0.,
pd.Timestamp("2021-06-16 13:00:00"): 0.125,
pd.Timestamp("2021-06-26 07:35:00"): 0.25,
pd.Timestamp("2021-06-30 08:40:00"): 0.,
pd.Timestamp("2021-08-06 00:45:00"): -0.125,
pd.Timestamp("2021-09-21 08:25:00"): 0.,
pd.Timestamp("2021-09-22 17:45:00"): -0.075,
pd.Timestamp("2021-10-24 12:15:00"): -0.,
pd.Timestamp("2021-10-24 08:40:00"): 0.125,
pd.Timestamp("2021-10-25 17:55:00"): 0.25,
pd.Timestamp("2021-10-28 22:40:00"): 0.125,
pd.Timestamp("2021-10-31 18:10:00"): 0.05,
pd.Timestamp("2021-11-02 20:40:00"): 0.,
pd.Timestamp("2021-11-15 19:20:00"): 0.05,
pd.Timestamp("2021-11-17 09:10:00"): 0.1,
pd.Timestamp("2021-11-19 14:50:00"): 0.,
pd.Timestamp("2021-12-24 14:45:00"): 0.1,
pd.Timestamp("2021-12-30 09:55:00"): 0.05,
}
DETERMINER_MULTIPLIER_UPDATES = {
pd.Timestamp("2020-08-25 17:00:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-10-21 21:15:00"): 0.075 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-11-16 10:45:00"): 0.0667 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-11-25 11:30:00"): 0.1 / RESPONSE_SCALE_BASE,
| pd.Timestamp("2020-11-27 08:55:00") | pandas.Timestamp |
"""
"Stacking: LGB, XGB, Cat with and without imputation (old & new LGBs),tsne,logistic"
"""
import os
from timeit import default_timer as timer
from datetime import datetime
from functools import reduce
import pandas as pd
import src.common as common
import src.config.constants as constants
import src.munging as process_data
import src.modeling as model
from sklearn.model_selection import KFold
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import LogisticRegression
common.set_timezone()
start = timer()
# Create RUN_ID
RUN_ID = datetime.now().strftime("%m%d_%H%M")
MODEL_NAME = os.path.basename(__file__).split(".")[0]
SEED = 42
EXP_DETAILS = "Stacking: LGB, XGB, Cat with and without imputation (old & new LGBs),tsne,logistic"
IS_TEST = False
PLOT_FEATURE_IMPORTANCE = False
TARGET = "claim"
MODEL_TYPE = "Ranking"
LOGGER_NAME = "ranking"
logger = common.get_logger(LOGGER_NAME, MODEL_NAME, RUN_ID, constants.LOG_DIR)
common.set_seed(SEED)
logger.info(f"Running for Model Number [{MODEL_NAME}] & [{RUN_ID}]")
common.update_tracking(RUN_ID, "model_number", MODEL_NAME, drop_incomplete_rows=True)
common.update_tracking(RUN_ID, "model_type", MODEL_TYPE)
common.update_tracking(RUN_ID, "metric", "roc_auc")
train_df, test_df, sample_submission_df = process_data.read_processed_data(
logger, constants.PROCESSED_DATA_DIR, train=True, test=True, sample_submission=True
)
# Read different submission files and merge them to create dataset
# for level 2
sub_1_predition_name = (
"sub_lgb_K5_nonull_mean_sum_max_no_imp_no_scaler_params_K_0924_1159_0.81605.gz"
)
sub_1_oof_name = (
"oof_lgb_K5_nonull_mean_sum_max_no_imp_no_scaler_params_K_0924_1159_0.81605.csv"
)
sub_1_test_pred = | pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_1_predition_name}") | pandas.read_csv |
import altair as alt
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from mplaltair import convert
from .._axis import convert_axis
from ..parse_chart import ChartMetadata
import pytest
df_quant = pd.DataFrame({
"a": [1, 2, 3], "b": [1.2, 2.4, 3.8], "c": [7, 5, -3],
"s": [50, 100, 200.0], "alpha": [0, .5, .8], "shape": [1, 2, 3], "fill": [1, 2, 3],
"neg": [-3, -4, -5], 'log': [11, 100, 1000], 'log2': [1, 3, 5],
"years": | pd.to_datetime(['1/1/2015', '1/1/2016', '1/1/2017']) | pandas.to_datetime |
"""
Summary: Pandas extension for converting 15-character Salesforce IDs to 18-character Salesforce IDs
Date: 2020-10-12
Contributor(s):
<NAME>
"""
from functools import lru_cache
from pandas import DataFrame
from pandas.api.extensions import register_series_accessor
@register_series_accessor("sf")
class PandasSalesforceIdConverter:
"""Salesforce ID converter extension for Pandas Series.
(Note: This should be applied to a series (column) and not the entire dataframe.)
Example:
>>>df = DataFrame({"sfid": ["a0r90000008cJza"], "expected": ["a0r90000008cJzaAAE"]})
>>>df.loc[:, "actual"] = df["sfid"].sf.convert
>>>df["actual"] == df["expected"]
True
"""
# Class variables
__ASCII_UPPERCASE = "".join([chr(i) for i in range(65, 91)])
# All alphabetic uppercase letters and numbers 0 - 5.
CHARS = __ASCII_UPPERCASE + "012345"
def __init__(self, pandas_object):
self.__obj = pandas_object
@staticmethod
def __get_bin_list(string):
"""Checks for uppercase letters within ID. Tags 1 if found, 0 otherwise.
>>>self.__get_bin_list("ABc")
[1, 1, 0]
"""
return [1 if str(c).isupper() else 0 for c in string]
@lru_cache(maxsize=1000)
def __convert_id(self, value):
"""Convert 15-character Salesforce ID 18-character version."""
# If value is not alphanumeric.
if not value.isalnum():
return ""
# If character count of value not equal to 15, return value.
elif len(value) != 15:
return value
# if alphanumeric and 15 characters long.
else:
calculated_chars: str = str()
chunks: list = [value[i:i+5] for i in range(0, 15, 5)]
for chunk in chunks:
bin_list = self.__get_bin_list(chunk)
idx = self.decoder(bin_list)
calculated_chars += self.CHARS[idx]
return f"{value}{calculated_chars}"
@staticmethod
def decoder(binary_list):
"""Return decimal value from collection of binary values.
>>>self.decoder([1, 1, 0])
3
"""
list_len = len(binary_list)
return sum([2**i if binary_list[i] == 1 else 0 for i in range(list_len)])
@property
def convert(self):
"""Run __convert_id() method to each row of Series."""
return self.__obj.apply(self.__convert_id)
# Tests
id_test_dict = dict(
sfid = ["a0r90000008cJza", "aCQR000000018HT", "aCQR000000018HYOAY"],
expected = ["a0r90000008cJzaAAE", "aCQR000000018HTOAY", "aCQR000000018HYOAY"],
)
df1 = | DataFrame(id_test_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 29 08:55:13 2018
@author: <NAME>
"""
import pandas as pd
import math
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
import numpy as np
#%%
data = pd.read_csv("D:\\Kaggle\\NYTaxi\\Data\\sample500.csv")
data.isnull().sum()
data = data.dropna(how = 'any', axis = 'rows')
#%%
data["pickup"] = pd.to_datetime(data["pickup_datetime"])
data["Day"] = data["pickup"].dt.weekday_name
data["Month"] = data["pickup"].dt.month
data["Hour"] = data["pickup"].dt.hour
data["lat_diff"] = (data["pickup_latitude"] - data["dropoff_latitude"]).abs()
data["long_diff"] = (data["pickup_longitude"] - data["dropoff_longitude"]).abs()
data = data.drop(data[data["lat_diff"] == 0].index)
for elem in data["Day"].unique():
data[str(elem)] = (data["Day"] == elem)*1
for elem in data["Month"].unique():
data["Month" + str(elem)] = (data["Month"] == elem)*1
for elem in data["Hour"].unique():
data["Hour" + str(elem)] = (data["Hour"] == elem)*1
#%%
for_regression = data.drop(columns = ["key", "pickup", "Day", "Month", "pickup_latitude",
"dropoff_latitude", "pickup_longitude", "dropoff_longitude", "pickup_datetime",
"Monday", "Month1", "Hour", "Hour0"])
for_regression.to_csv("D:\\Kaggle\\NYTaxi\\Data\\for_regression.csv", index = False)
fitToPCA = for_regression.drop(columns = ["fare_amount"])
pca = PCA(n_components = 43)
PrincipleComponents = pca.fit_transform(fitToPCA)
variance = pca.explained_variance_ratio_
variance_ratio = np.cumsum(np.round(variance, decimals=10)*100)
pca_df = | pd.DataFrame(PrincipleComponents) | pandas.DataFrame |
from tempfile import NamedTemporaryFile
import numpy.testing
import pandas
import pytest
from conftest import ODC_VERSION, codc, odc_modules
SAMPLE_DATA = {
"col1": [1, 2, 3, 4, 5, 6, 7],
"col2": [0, 0, 0, 0, 0, 0, 0],
"col3": [73] * 7,
"col4": [1.432] * 7,
"col5": [-17, -7, -7, None, 1, 4, 4],
"col6": ["aoeu", "aoeu", "aaaaaaaooooooo", "None", "boo", "squiggle", "a"],
"col7": ["abcd"] * 7,
"col8": [2.345] * 7,
"col9": [999.99, 888.88, 777.77, 666.66, 555.55, 444.44, 333.33],
"col10": [999.99, 888.88, 777.77, 666.66, 555.55, 444.44, 333.33],
"col11": [1, None, 3, 4, 5, None, 7],
"col12": [-512, None, 3, 7623, -22000, None, 7],
"col13": [-1234567, 8765432, None, 22, 22222222, -81222323, None],
# 'col21': [None] * 7
}
SAMPLE_PROPERTIES = {
"property1": "this is a string ....",
"property2": ".......and another .......",
}
def encode_sample(odyssey, f):
df = pandas.DataFrame(SAMPLE_DATA)
types = {
"col8": odyssey.REAL,
"col10": odyssey.REAL,
# 'col21': odyssey.REAL
}
properties = SAMPLE_PROPERTIES
odyssey.encode_odb(df, f, types=types, rows_per_frame=4, properties=properties)
if not isinstance(f, str):
f.flush()
return df
@pytest.mark.parametrize("odyssey", odc_modules)
def test_encode_decode_filename(odyssey):
with NamedTemporaryFile() as fencode:
df = encode_sample(odyssey, fencode)
df2 = odyssey.read_odb(fencode.name, single=True)
assert isinstance(df2, pandas.DataFrame)
for col in df.keys():
s1 = df[col]
s2 = df2[col]
if col in ("col8", "col10"):
numpy.testing.assert_array_almost_equal(s1, s2, decimal=2)
else:
numpy.testing.assert_array_equal(s1, s2)
@pytest.mark.parametrize("odyssey", odc_modules)
def test_encode_decode_file_object(odyssey):
with NamedTemporaryFile() as fencode:
df = encode_sample(odyssey, fencode)
with open(fencode.name, "rb") as fread:
df2 = odyssey.read_odb(fread, single=True)
assert isinstance(df2, pandas.DataFrame)
for col in df.keys():
s1 = df[col]
s2 = df2[col]
if col in ("col8", "col10"):
numpy.testing.assert_array_almost_equal(s1, s2, decimal=2)
else:
numpy.testing.assert_array_equal(s1, s2)
@pytest.mark.parametrize("odyssey", odc_modules)
def test_encode_decode_simple_columns(odyssey):
with NamedTemporaryFile() as fencode:
df = encode_sample(odyssey, fencode)
cols = ("col6", "col7")
df2 = odyssey.read_odb(fencode.name, columns=cols, single=True)
assert isinstance(df2, pandas.DataFrame)
assert df2.shape[1] == len(cols)
for col in cols:
numpy.testing.assert_array_equal(df[col], df2[col])
@pytest.mark.parametrize("odyssey", odc_modules)
def test_aggregate_non_matching(odyssey):
"""
Where we aggregate tables with non-matching columns, ensure that the infilled
missing values are type appropriate
"""
sample1 = {"col1": [111, 222, 333]}
sample2 = {"col2": ["aaa", "bbb", "ccc"]}
with NamedTemporaryFile() as fencode:
odyssey.encode_odb(pandas.DataFrame(sample1), fencode)
odyssey.encode_odb(pandas.DataFrame(sample2), fencode)
fencode.flush()
df = odyssey.read_odb(fencode.name, single=True)
assert isinstance(df, pandas.DataFrame)
assert df["col1"].dtype == "float64"
assert df["col2"].dtype == "object"
assert df["col2"][0] is None
numpy.testing.assert_array_equal(df["col1"], [111.0, 222.0, 333.0, numpy.nan, numpy.nan, numpy.nan])
numpy.testing.assert_array_equal(df["col2"], [None, None, None, "aaa", "bbb", "ccc"])
@pytest.mark.parametrize("odyssey", odc_modules)
def test_unqualified_names(odyssey):
"""
Check that we can extract columns by unqualified name, and by fully qualified name
(that is, where the name contains an '@', we can extract by short name
"""
sample = {
"col1@tbl1": [11, 12, 13, 14, 15, 16],
"col2@tbl1": [21, 22, 23, 24, 25, 26],
"col1@tbl2": [31, 32, 33, 34, 35, 36],
"col3@tbl2": [41, 42, 43, 44, 45, 46],
"col4": [51, 52, 53, 54, 55, 56],
}
input_df = | pandas.DataFrame(sample) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 16 14:24:40 2019
@author: ziskin
"""
# from pathlib import Path
from PW_paths import work_yuval
sound_path = work_yuval / 'sounding'
era5_path = work_yuval / 'ERA5'
edt_path = sound_path / 'edt'
ceil_path = work_yuval / 'ceilometers'
des_path = work_yuval / 'deserve'
def load_field_from_radiosonde(
path=sound_path, field='Tm', data_type='phys', reduce='min',
dim='time', plot=True):
"""data_type: phys for 2008-2013, 10 sec sample rate,
PTU_Wind for 2014-2016 2 sec sample rate,
edt for 2018-2019 1 sec sample rate with gps"""
from aux_gps import plot_tmseries_xarray
from aux_gps import path_glob
import xarray as xr
def reduce_da(da):
if reduce is not None:
if reduce == 'min':
da = da.min(dim)
elif reduce == 'max':
da = da.max(dim)
da = da.reset_coords(drop=True)
return da
if data_type is not None:
file = path_glob(
path, 'bet_dagan_{}_sounding_*.nc'.format(data_type))[-1]
da = xr.open_dataset(file)[field]
da = da.sortby('sound_time')
da = reduce_da(da)
else:
files = path_glob(path, 'bet_dagan_*_sounding_*.nc')
assert len(files) == 3
ds = [xr.open_dataset(x)[field] for x in files]
da = xr.concat(ds, 'sound_time')
da = da.sortby('sound_time')
da = reduce_da(da)
if plot:
plot_tmseries_xarray(da)
return da
def get_field_from_radiosonde(path=sound_path, field='Tm', data_type='phys',
reduce='min', dim='time',
times=['2007', '2019'], plot=True):
"""
old version, to be replaced with load_field_from_radiosonde,
but still useful for ZWD
Parameters
----------
path : TYPE, optional
DESCRIPTION. The default is sound_path.
field : TYPE, optional
DESCRIPTION. The default is 'Tm'.
data_type : TYPE, optional
DESCRIPTION. The default is 'phys'.
reduce : TYPE, optional
DESCRIPTION. The default is 'min'.
dim : TYPE, optional
DESCRIPTION. The default is 'time'.
times : TYPE, optional
DESCRIPTION. The default is ['2007', '2019'].
plot : TYPE, optional
DESCRIPTION. The default is True.
Returns
-------
da : TYPE
DESCRIPTION.
"""
import xarray as xr
from aux_gps import get_unique_index
from aux_gps import keep_iqr
from aux_gps import plot_tmseries_xarray
from aux_gps import path_glob
file = path_glob(path, 'bet_dagan_{}_sounding_*.nc'.format(data_type))[0]
file = path / 'bet_dagan_phys_PW_Tm_Ts_2007-2019.nc'
ds = xr.open_dataset(file)
if field is not None:
da = ds[field]
if reduce is not None:
if reduce == 'min':
da = da.min(dim)
elif reduce == 'max':
da = da.max(dim)
da = da.reset_coords(drop=True)
da = get_unique_index(da, dim='sound_time')
da = keep_iqr(da, k=2.0, dim='sound_time', drop_with_freq='12H')
da = da.sel(sound_time=slice(*times))
if plot:
plot_tmseries_xarray(da)
return da
def calculate_edt_north_east_distance(lat_da, lon_da, method='fast'):
"""fast mode is 11 times faster than slow mode, however fast distance is
larger than slow...solve this mystery"""
from shapely.geometry import Point
from pyproj import Transformer
import geopandas as gpd
import pandas as pd
import numpy as np
def change_sign(x, y, value):
if x <= y:
return -value
else:
return value
if method == 'fast':
# prepare bet dagan coords:
bd_lat = 32.01
bd_lon = 34.81
fixed_lat = np.ones(lat_da.shape) * bd_lat
fixed_lon = np.ones(lon_da.shape) * bd_lon
# define projections:
# wgs84 = pyproj.CRS('EPSG:4326')
# isr_tm = pyproj.CRS('EPSG:2039')
# creare transfrom from wgs84 (lat, lon) to new israel network (meters):
# transformer = Transformer.from_crs(wgs84, isr_tm, always_xy=True)
transformer = Transformer.from_proj(4326, 2039, always_xy=True)
bd_meters = transformer.transform(bd_lat, bd_lon)
bd_point_meters = Point(bd_meters[0], bd_meters[1])
# # create Points from lat_da, lon_da in wgs84:
# dyn_lat = [Point(x, bd_lon) for x in lat_da.values[::2]]
# dyn_lon = [Point(bd_lat, x) for x in lon_da.values[::2]]
# transform to meters:
dyn_lat_meters = transformer.transform(lat_da.values, fixed_lon)
dyn_lon_meters = transformer.transform(fixed_lat, lon_da.values)
# calculate distance in km:
north_distance = [Point(dyn_lat_meters[0][x],dyn_lat_meters[1][x]).distance(bd_point_meters) / 1000 for x in range(lat_da.size)]
east_distance = [Point(dyn_lon_meters[0][x],dyn_lon_meters[1][x]).distance(bd_point_meters) / 1000 for x in range(lon_da.size)]
# sign change:
new_north_distance = [change_sign(lat_da.values[x], bd_lat, north_distance[x]) for x in range(lat_da.size)]
new_east_distance = [change_sign(lon_da.values[x], bd_lon, east_distance[x]) for x in range(lon_da.size)]
north = lat_da.copy(data=new_north_distance)
north.attrs['units'] = 'km'
north.attrs['long_name'] = 'distance north'
east = lon_da.copy(data=new_east_distance)
east.attrs['long_name'] = 'distance east'
east.attrs['units'] = 'km'
return north, east
elif method == 'slow':
bet_dagan = pd.DataFrame(index=[0])
bet_dagan['x'] = 34.81
bet_dagan['y'] = 32.01
bet_dagan_gdf = gpd.GeoDataFrame(
bet_dagan, geometry=gpd.points_from_xy(
bet_dagan['x'], bet_dagan['y']))
bet_dagan_gdf.crs = {'init': 'epsg:4326'}
# transform to israeli meters coords:
bet_dagan_gdf.to_crs(epsg=2039, inplace=True)
bd_as_point = bet_dagan_gdf.geometry[0]
bd_lon = bet_dagan.loc[0, 'x']
bd_lat = bet_dagan.loc[0, 'y']
df = lat_da.reset_coords(drop=True).to_dataframe(name='lat')
df['lon'] = lon_da.reset_coords(drop=True).to_dataframe()
# df = ds.reset_coords(drop=True).to_dataframe()
df['fixed_lon'] = 34.81 * np.ones(df['lon'].shape)
df['fixed_lat'] = 32.01 * np.ones(df['lat'].shape)
gdf_fixed_lon = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df['fixed_lon'],
df.lat))
gdf_fixed_lon.crs = {'init': 'epsg:4326'}
gdf_fixed_lon.dropna(inplace=True)
gdf_fixed_lon.to_crs(epsg=2039, inplace=True)
gdf_fixed_lat = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df['fixed_lat']))
gdf_fixed_lat.crs = {'init': 'epsg:4326'}
gdf_fixed_lat.dropna(inplace=True)
gdf_fixed_lat.to_crs(epsg=2039, inplace=True)
# calculate distance north from bet dagan coords in km:
df['north_distance'] = gdf_fixed_lon.geometry.distance(
bd_as_point) / 1000.0
# calculate distance east from bet dagan coords in km:
df['east_distance'] = gdf_fixed_lat.geometry.distance(
bd_as_point) / 1000.0
# fix sign to indicate: negtive = south:
df['north_distance'] = df.apply(
lambda x: change_sign(
x.lat, bd_lat, x.north_distance), axis=1)
# fix sign to indicate: negtive = east:
df['east_distance'] = df.apply(
lambda x: change_sign(
x.lon, bd_lon, x.east_distance), axis=1)
return df['north_distance'].to_xarray(), df['east_distance'].to_xarray()
#def produce_radiosonde_edt_north_east_distance(path=sound_path, savepath=None,
# verbose=True):
# from aux_gps import path_glob
# import geopandas as gpd
# import pandas as pd
# import xarray as xr
# import numpy as np
#
# def change_sign(x, y, value):
# if x <= y:
# return -value
# else:
# return value
# file = path_glob(path, 'bet_dagan_edt_sounding_*.nc')
# ds = xr.load_dataset(file[0])
# ds_geo = ds[['lat', 'lon']]
# # prepare bet dagan coords:
# bet_dagan = pd.DataFrame(index=[0])
# bet_dagan['x'] = 34.81
# bet_dagan['y'] = 32.01
# bet_dagan_gdf = gpd.GeoDataFrame(
# bet_dagan, geometry=gpd.points_from_xy(
# bet_dagan['x'], bet_dagan['y']))
# bet_dagan_gdf.crs = {'init': 'epsg:4326'}
# # transform to israeli meters coords:
# bet_dagan_gdf.to_crs(epsg=2039, inplace=True)
# bd_as_point = bet_dagan_gdf.geometry[0]
# bd_lon = bet_dagan.loc[0, 'x']
# bd_lat = bet_dagan.loc[0, 'y']
# ds_list = []
# for i in range(ds['sound_time'].size):
# record = ds['sound_time'].isel({'sound_time': i})
# record = record.dt.strftime('%Y-%m-%d %H:%M').values.item()
# if verbose:
# print('processing {}.'.format(record))
# sounding = ds_geo.isel({'sound_time': i})
# df = sounding.reset_coords(drop=True).to_dataframe()
# df['fixed_lon'] = 34.81 * np.ones(df['lon'].shape)
# df['fixed_lat'] = 32.01 * np.ones(df['lat'].shape)
# gdf_fixed_lon = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df['fixed_lon'],
# df.lat))
# gdf_fixed_lon.crs = {'init': 'epsg:4326'}
# gdf_fixed_lon.dropna(inplace=True)
# gdf_fixed_lon.to_crs(epsg=2039, inplace=True)
# gdf_fixed_lat = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
# df['fixed_lat']))
# gdf_fixed_lat.crs = {'init': 'epsg:4326'}
# gdf_fixed_lat.dropna(inplace=True)
# gdf_fixed_lat.to_crs(epsg=2039, inplace=True)
# # calculate distance north from bet dagan coords in km:
# df['north_distance'] = gdf_fixed_lon.geometry.distance(
# bd_as_point) / 1000.0
# # calculate distance east from bet dagan coords in km:
# df['east_distance'] = gdf_fixed_lat.geometry.distance(
# bd_as_point) / 1000.0
# # fix sign to indicate: negtive = south:
# df['north_distance'] = df.apply(
# lambda x: change_sign(
# x.lat, bd_lat, x.north_distance), axis=1)
# # fix sign to indicate: negtive = east:
# df['east_distance'] = df.apply(
# lambda x: change_sign(
# x.lon, bd_lon, x.east_distance), axis=1)
# # convert to xarray:
# ds_list.append(df[['east_distance', 'north_distance']].to_xarray())
# ds_distance = xr.concat(ds_list, 'sound_time')
# ds_distance['sound_time'] = ds['sound_time']
# ds_distance.to_netcdf(savepath / 'bet_dagan_edt_distance.nc', 'w')
# return ds_distance
def analyse_radiosonde_climatology(path=sound_path, data_type='phys',
field='Rho_wv', month=3, season=None,
times=None, hour=None):
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, LogLocator, NullFormatter
from aux_gps import path_glob
file = path_glob(path, 'bet_dagan_{}_sounding_*.nc'.format(data_type))
da = xr.load_dataset(file[0])[field]
if times is not None:
da = da.sel(sound_time=slice(*times))
if hour is not None:
da = da.sel(sound_time=da['sound_time.hour'] == hour)
try:
name = da.attrs['long_name']
except KeyError:
name = field
units = da.attrs['units']
if season is not None:
clim = da.groupby('sound_time.season').mean('sound_time')
df = clim.to_dataset('season').to_dataframe()
df_copy = df.copy()
seasons = [x for x in df.columns if season not in x]
df.reset_index(inplace=True)
# df.loc[:, season] -= df.loc[:, season]
df.loc[:, seasons[0]] -= df.loc[:, season]
df.loc[:, seasons[1]] -= df.loc[:, season]
df.loc[:, seasons[2]] -= df.loc[:, season]
fig, ax = plt.subplots(figsize=(12, 5))
df.plot(x=seasons[0], y='Height', logy=True, color='r', ax=ax)
df.plot(x=seasons[1], y='Height', logy=True, ax=ax, color='b')
df.plot(x=seasons[2], y='Height', logy=True, ax=ax, color='g')
ax.axvline(x=0, color='k')
# ax.set_xlim(-1, 15)
ax.legend(seasons+[season], loc='best')
else:
clim = da.groupby('sound_time.month').mean('sound_time')
if month == 12:
months = [11, 12, 1]
elif month == 1:
months = [12, 1, 2]
else:
months = [month - 1, month, month + 1]
df_copy = clim.to_dataset('month').to_dataframe()
df = clim.sel(month=months).to_dataset('month').to_dataframe()
month_names = pd.to_datetime(months, format='%m').month_name()
df.reset_index(inplace=True)
df.loc[:, months[0]] -= df.loc[:, month]
df.loc[:, months[2]] -= df.loc[:, month]
df.loc[:, months[1]] -= df.loc[:, month]
ax = df.plot(x=months[0], y='Height', logy=True, color='r')
df.plot(x=months[1], y='Height', logy=True, ax=ax, color='k')
df.plot(x=months[2], y='Height', logy=True, ax=ax, color='b')
ax.legend(month_names)
ax.set_xlabel('{} [{}]'.format(name, units))
ax.set_ylim(100, 10000)
ax.get_yaxis().set_major_formatter(ScalarFormatter())
locmaj = LogLocator(base=10,numticks=12)
ax.yaxis.set_major_locator(locmaj)
locmin = LogLocator(base=10.0,subs=(0.2,0.4,0.6,0.8),numticks=12)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(NullFormatter())
ax.set_ylabel('height [m]')
if hour is not None:
ax.set_title('hour = {}'.format(hour))
return df_copy
def process_new_field_from_radiosonde_data(phys_ds, dim='sound_time',
field_name='pw', bottom=None,
top=None, verbose=False):
import xarray as xr
from aux_gps import keep_iqr
field_list = []
for i in range(phys_ds[dim].size):
record = phys_ds[dim].isel({dim: i})
if 'time' in dim:
record = record.dt.strftime('%Y-%m-%d %H:%M').values.item()
if verbose:
print('processing {} for {} field.'.format(record, field_name))
if field_name == 'pw':
long_name = 'Precipatiable water'
Dewpt = phys_ds['Dewpt'].isel({dim: i})
P = phys_ds['P'].isel({dim: i})
try:
field, unit = wrap_xr_metpy_pw(Dewpt, P, bottom=bottom, top=top)
except ValueError:
field, unit = wrap_xr_metpy_pw(Dewpt, P, bottom=None, top=None)
elif field_name == 'tm':
long_name = 'Water vapor mean air temperature'
P = phys_ds['P'].isel({dim: i})
T = phys_ds['T'].isel({dim: i})
RH = phys_ds['RH'].isel({dim: i})
if 'VP' not in phys_ds:
if 'MR' not in phys_ds:
MR = wrap_xr_metpy_mixing_ratio(P, T, RH, verbose=False)
VP = wrap_xr_metpy_vapor_pressure(P, MR)
else:
VP = phys_ds['VP'].isel({dim: i})
if 'Rho' not in phys_ds:
Rho = wrap_xr_metpy_density(P, T, MR, verbose=False)
else:
Rho = phys_ds['Rho'].isel({dim: i})
field, unit = calculate_tm_via_pressure_sum(VP, T, Rho, P,
bottom=bottom,
top=top)
elif field_name == 'ts':
long_name = 'Surface temperature'
if 'Height' in phys_ds['T'].dims:
dropped = phys_ds['T'].isel({dim: i}).dropna('Height')
elif 'time' in phys_ds['T'].dims:
dropped = phys_ds['T'].isel({dim: i}).dropna('time')
field = dropped[0].values.item() + 273.15
unit = 'K'
field_list.append(field)
da = xr.DataArray(field_list, dims=[dim])
da[dim] = phys_ds[dim]
da.attrs['units'] = unit
da.attrs['long_name'] = long_name
if top is not None:
da.attrs['top'] = top
if bottom is not None:
da.attrs['bottom'] = top
da = keep_iqr(da, dim=dim, k=1.5)
if verbose:
print('Done!')
return da
def process_radiosonde_data(path=sound_path, savepath=sound_path,
data_type='phys', station='bet_dagan', verbose=False):
import xarray as xr
from aux_gps import path_glob
file = path_glob(path, '{}_{}_sounding_*.nc'.format(data_type, station))
phys_ds = xr.load_dataset(file[0])
ds = xr.Dataset()
ds['PW'] = process_new_field_from_radiosonde_data(phys_ds, dim='sound_time',
field_name='pw',
bottom=None,
top=None, verbose=verbose)
ds['Tm'] = process_new_field_from_radiosonde_data(phys_ds, dim='sound_time',
field_name='tm',
bottom=None,
top=None, verbose=verbose)
ds['Ts'] = process_new_field_from_radiosonde_data(phys_ds, dim='sound_time',
field_name='ts',
bottom=None,
top=None, verbose=verbose)
if data_type == 'phys':
ds['cloud_code'] = phys_ds['cloud_code']
ds['sonde_type'] = phys_ds['sonde_type']
ds['min_time'] = phys_ds['min_time']
ds['max_time'] = phys_ds['max_time']
yr_min = ds['sound_time'].min().dt.year.item()
yr_max = ds['sound_time'].max().dt.year.item()
filename = '{}_{}_PW_Tm_Ts_{}-{}.nc'.format(station, data_type, yr_min, yr_max)
print('saving {} to {}'.format(filename, savepath))
ds.to_netcdf(savepath / filename, 'w')
print('Done!')
return ds
def calculate_tm_via_trapz_height(VP, T, H):
from scipy.integrate import cumtrapz
import numpy as np
# change T units to K:
T_copy = T.copy(deep=True) + 273.15
num = cumtrapz(VP / T_copy, H, initial=np.nan)
denom = cumtrapz(VP / T_copy**2, H, initial=np.nan)
tm = num / denom
return tm
def calculate_tm_via_pressure_sum(VP, T, Rho, P, bottom=None, top=None,
cumulative=False, verbose=False):
import pandas as pd
import numpy as np
def tm_sum(VP, T, Rho, P, bottom=None, top=None):
# slice for top and bottom:
if bottom is not None:
P = P.where(P <= bottom, drop=True)
T = T.where(P <= bottom, drop=True)
Rho = Rho.where(P <= bottom, drop=True)
VP = VP.where(P <= bottom, drop=True)
if top is not None:
P = P.where(P >= top, drop=True)
T = T.where(P >= top, drop=True)
Rho = Rho.where(P >= top, drop=True)
VP = VP.where(P >= top, drop=True)
# convert to Kelvin:
T_values = T.values + 273.15
# other units don't matter since it is weighted temperature:
VP_values = VP.values
P_values = P.values
Rho_values = Rho.values
# now the pressure sum method:
p = pd.Series(P_values)
dp = p.diff(-1).abs()
num = pd.Series(VP_values / (T_values * Rho_values))
num_sum = num.shift(-1) + num
numerator = (num_sum * dp / 2).sum()
denom = pd.Series(VP_values / (T_values**2 * Rho_values))
denom_sum = denom.shift(-1) + denom
denominator = (denom_sum * dp / 2).sum()
tm = numerator / denominator
return tm
try:
T_unit = T.attrs['units']
assert T_unit == 'degC'
except KeyError:
T_unit = 'degC'
if verbose:
print('assuming T units are degC...')
# check that VP and P have the same units:
assert P.attrs['units'] == VP.attrs['units']
P_values = P.values
if cumulative:
tm_list = []
# first value is nan:
tm_list.append(np.nan)
for pre_val in P_values[1:]:
if np.isnan(pre_val):
tm_list.append(np.nan)
continue
tm = tm_sum(VP, T, Rho, P, bottom=None, top=pre_val)
tm_list.append(tm)
tm = np.array(tm_list)
return tm, 'K'
else:
tm = tm_sum(VP, T, Rho, P, bottom=bottom, top=top)
return tm, 'K'
def wrap_xr_metpy_pw(dewpt, pressure, bottom=None, top=None, verbose=False,
cumulative=False):
from metpy.calc import precipitable_water
from metpy.units import units
import numpy as np
try:
T_unit = dewpt.attrs['units']
assert T_unit == 'degC'
except KeyError:
T_unit = 'degC'
if verbose:
print('assuming dewpoint units are degC...')
dew_values = dewpt.values * units(T_unit)
try:
P_unit = pressure.attrs['units']
assert P_unit == 'hPa'
except KeyError:
P_unit = 'hPa'
if verbose:
print('assuming pressure units are hPa...')
if top is not None:
top_with_units = top * units(P_unit)
else:
top_with_units = None
if bottom is not None:
bottom_with_units = bottom * units(P_unit)
else:
bottom_with_units = None
pressure_values = pressure.values * units(P_unit)
if cumulative:
pw_list = []
# first value is nan:
pw_list.append(np.nan)
for pre_val in pressure_values[1:]:
if np.isnan(pre_val):
pw_list.append(np.nan)
continue
pw = precipitable_water(pressure_values, dew_values, bottom=None,
top=pre_val)
pw_units = pw.units.format_babel('~P')
pw_list.append(pw.magnitude)
pw = np.array(pw_list)
return pw, pw_units
else:
pw = precipitable_water(pressure_values, dew_values,
bottom=bottom_with_units, top=top_with_units)
pw_units = pw.units.format_babel('~P')
return pw.magnitude, pw_units
def calculate_absolute_humidity_from_partial_pressure(VP, T, verbose=False):
Rs_v = 461.52 # Specific gas const for water vapour, J kg^{-1} K^{-1}
try:
VP_unit = VP.attrs['units']
assert VP_unit == 'hPa'
except KeyError:
VP_unit = 'hPa'
if verbose:
print('assuming vapor units are hPa...')
# convert to Pa:
VP_values = VP.values * 100.0
try:
T_unit = T.attrs['units']
assert T_unit == 'degC'
except KeyError:
T_unit = 'degC'
if verbose:
print('assuming temperature units are degree celsius...')
# convert to Kelvin:
T_values = T.values + 273.15
Rho_wv = VP_values/(Rs_v * T_values)
# resulting units are kg/m^3, convert to g/m^3':
Rho_wv *= 1000.0
Rho_wv
da = VP.copy(data=Rho_wv)
da.attrs['units'] = 'g/m^3'
da.attrs['long_name'] = 'Absolute humidity'
return da
def wrap_xr_metpy_specific_humidity(MR, verbose=False):
from metpy.calc import specific_humidity_from_mixing_ratio
from metpy.units import units
try:
MR_unit = MR.attrs['units']
assert MR_unit == 'g/kg'
except KeyError:
MR_unit = 'g/kg'
if verbose:
print('assuming mixing ratio units are gr/kg...')
MR_values = MR.values * units(MR_unit)
SH = specific_humidity_from_mixing_ratio(MR_values)
da = MR.copy(data=SH.magnitude)
da.attrs['units'] = MR_unit
da.attrs['long_name'] = 'Specific humidity'
return da
def calculate_atmospheric_refractivity(P, T, RH, verbose=False):
MR = wrap_xr_metpy_mixing_ratio(P, T, RH)
VP = wrap_xr_metpy_vapor_pressure(P, MR)
try:
T_unit = T.attrs['units']
assert T_unit == 'degC'
except KeyError:
T_unit = 'degC'
if verbose:
print('assuming temperature units are degree celsius...')
# convert to Kelvin:
T_k = T + 273.15
N = 77.6 * P / T_k + 3.73e5 * VP / T_k**2
N.attrs['units'] = 'dimensionless'
N.attrs['long_name'] = 'Index of Refractivity'
return N
def convert_wind_speed_direction_to_zonal_meridional(WS, WD, verbose=False):
# make sure it is right!
import numpy as np
# drop nans from WS and WD:
dim = list(set(WS.dims))[0]
assert dim == list(set(WD.dims))[0]
DS = WS.to_dataset(name='WS')
DS['WD'] = WD
# DS = DS.dropna(dim)
WS = DS['WS']
WD = DS['WD']
assert WS.size == WD.size
WD = 270 - WD
try:
WS_unit = WS.attrs['units']
if WS_unit != 'm/s':
if WS_unit == 'knots':
# 1knots= 0.51444445m/s
if verbose:
print('wind speed in knots, converting to m/s')
WS = WS * 0.51444445
WS.attrs.update(units='m/s')
except KeyError:
WS_unit = 'm/s'
if verbose:
print('assuming wind speed units are m/s...')
U = WS * np.cos(np.deg2rad(WD))
V = WS * np.sin(np.deg2rad(WD))
U.attrs['long_name'] = 'zonal_velocity'
U.attrs['units'] = 'm/s'
V.attrs['long_name'] = 'meridional_velocity'
V.attrs['units'] = 'm/s'
U.name = 'u'
V.name = 'v'
return U, V
#def compare_WW2014_to_Rib_all_seasons(path=sound_path, times=None,
# plot_type='hist', bins=25):
# import matplotlib.pyplot as plt
# import seaborn as sns
# if plot_type == 'hist' or plot_type == 'scatter':
# fig_hist, axs = plt.subplots(2, 2, sharex=False, sharey=True,
# figsize=(10, 8))
# seasons = ['DJF', 'MAM', 'JJA', 'SON']
# cmap = sns.color_palette("colorblind", 2)
# for i, ax in enumerate(axs.flatten()):
# ax = compare_WW2014_to_Rib_single_subplot(sound_path=path,
# season=seasons[i],
# times=times, ax=ax,
# colors=[cmap[0],
# cmap[1]],
# plot_type=plot_type,
# bins=bins)
# fig_hist.tight_layout()
# return
#def compare_WW2014_to_Rib_single_subplot(sound_path=sound_path, season=None,
# times=None, bins=None,
# ax=None, colors=None,
# plot_type='hist'):
# from aux_gps import path_glob
# import xarray as xr
# from PW_from_gps_figures import plot_two_histograms_comparison
# ww_file = path_glob(sound_path, 'MLH_WW2014_*.nc')[-1]
# ww = xr.load_dataarray(ww_file)
# rib_file = path_glob(sound_path, 'MLH_Rib_*.nc')[-1]
# rib = xr.load_dataarray(rib_file)
# ds = ww.to_dataset(name='MLH_WW')
# ds['MLH_Rib'] = rib
# if season is not None:
# ds = ds.sel(sound_time=ds['sound_time.season'] == season)
# print('selected {} season'.format(season))
# labels = ['MLH-Rib for {}'.format(season), 'MLH-WW for {}'.format(season)]
# else:
# labels = ['MLH-Rib Annual', 'MLH-WW Annual']
# if times is not None:
# ds = ds.sel(sound_time=slice(*times))
# print('selected {}-{} period'.format(*times))
# title = 'Bet-Dagan radiosonde {}-{} period'.format(*times)
# else:
# times = [ds.sound_time.min().dt.year.item(),
# ds.sound_time.max().dt.year.item()]
# title = 'Bet-Dagan radiosonde {}-{} period'.format(*times)
# if plot_type == 'hist':
# ax = plot_two_histograms_comparison(ds['MLH_Rib'], ds['MLH_WW'],
# ax=ax, labels=labels,
# colors=colors, bins=bins)
# ax.legend()
# ax.set_ylabel('Frequency')
# ax.set_xlabel('MLH [m]')
# ax.set_title(title)
# elif plot_type == 'scatter':
# if ax is None:
# fig, ax = plt.subplots()
# ax.scatter(ds['MLH_Rib'].values, ds['MLH_WW'].values)
# ax.set_xlabel(labels[0].split(' ')[0] + ' [m]')
# ax.set_ylabel(labels[1].split(' ')[0] + ' [m]')
# season_label = labels[0].split(' ')[-1]
# ax.plot(ds['MLH_Rib'], ds['MLH_Rib'], c='r')
# ax.legend(['y = x', season_label], loc='upper right')
# ax.set_title(title)
# return ax
#def calculate_Wang_and_Wang_2014_MLH_all_profiles(sound_path=sound_path,
# data_type='phys',
# hour=12, plot=True,
# savepath=None):
# import xarray as xr
# from aux_gps import smooth_xr
# import matplotlib.pyplot as plt
# import seaborn as sns
# from aux_gps import save_ncfile
# from PW_from_gps_figures import plot_seasonal_histogram
# if data_type == 'phys':
# bd = xr.load_dataset(sound_path / 'bet_dagan_phys_sounding_2007-2019.nc')
# elif data_type == 'edt':
# bd = xr.load_dataset(sound_path / 'bet_dagan_edt_sounding_2016-2019.nc')
# # N = calculate_atmospheric_refractivity(bd['P'], bd['T'], bd['VP'])
# # assemble all WW vars:
# WW = bd['N'].to_dataset(name='N')
# WW['RH'] = bd['RH']
# WW['PT'] = bd['PT']
# WW['MR'] = bd['MR']
# # slice hour:
# WW = WW.sel(sound_time=WW['sound_time.hour'] == hour)
# # produce gradients:
# WW_grad = WW.differentiate('Height', edge_order=2)
# # smooth them with 1-2-1 smoother:
# WW_grad_smoothed = smooth_xr(WW_grad, 'Height')
## return WW_grad_smoothed
# mlhs = []
# for dt in WW_grad_smoothed.sound_time:
# df = WW_grad_smoothed.sel(sound_time=dt).reset_coords(drop=True).to_dataframe()
# mlhs.append(calculate_Wang_and_Wang_2014_MLH_single_profile(df, plot=False))
# mlh = xr.DataArray(mlhs, dims=['sound_time'])
# mlh['sound_time'] = WW_grad_smoothed['sound_time']
# mlh.name = 'MLH'
# mlh.attrs['long_name'] = 'Mixing layer height'
# mlh.attrs['units'] = 'm'
# mlh.attrs['method'] = 'W&W2014 using PT, N, MR and RH'
# if savepath is not None:
# filename = 'MLH_WW2014_{}_{}.nc'.format(data_type, hour)
# save_ncfile(mlh, sound_path, filename)
# if plot:
# cmap = sns.color_palette("colorblind", 5)
# fig, ax = plt.subplots(3, 1, sharex=True, figsize=(12, 9))
# df_mean = mlh.groupby('sound_time.month').mean().to_dataframe('mean_MLH')
# df_mean.plot(color=cmap, ax=ax[0])
# ax[0].grid()
# ax[0].set_ylabel('Mean MLH [m]')
# ax[0].set_title(
# 'Annual mixing layer height from Bet-Dagan radiosonde profiles ({}Z) using W&W2014 method'.format(hour))
# df_std = mlh.groupby('sound_time.month').std().to_dataframe('std_MLH')
# df_std.plot(color=cmap, ax=ax[1])
# ax[1].grid()
# ax[1].set_ylabel('Std MLH [m]')
# df_count = mlh.groupby('sound_time.month').count().to_dataframe('count_MLH')
# df_count.plot(color=cmap, ax=ax[2])
# ax[2].grid()
# ax[2].set_ylabel('Count MLH [#]')
# fig.tight_layout()
# plot_seasonal_histogram(mlh, dim='sound_time', xlim=(-100, 3000),
# xlabel='MLH [m]',
# suptitle='MLH histogram using W&W 2014 method')
# return mlh
#def calculate_Wang_and_Wang_2014_MLH_single_profile(df, alt_cutoff=3000,
# plot=True):
# import pandas as pd
# import numpy as np
# import matplotlib.pyplot as plt
# # first , cutoff:
# df = df.loc[0: alt_cutoff]
# if plot:
## df.plot(subplots=True)
# fig, ax = plt.subplots(1, 4, figsize=(20, 16))
# df.loc[0: 1200, 'PT'].reset_index().plot.line(y='Height', x='PT', ax=ax[0], legend=False)
# df.loc[0: 1200, 'RH'].reset_index().plot.line(y='Height', x='RH', ax=ax[1], legend=False)
# df.loc[0: 1200, 'MR'].reset_index().plot.line(y='Height', x='MR', ax=ax[2], legend=False)
# df.loc[0: 1200, 'N'].reset_index().plot.line(y='Height', x='N', ax=ax[3], legend=False)
# [x.grid() for x in ax]
# ind = np.arange(1, 11)
# pt10 = df['PT'].nlargest(n=10).index.values
# n10 = df['N'].nsmallest(n=10).index.values
# rh10 = df['RH'].nsmallest(n=10).index.values
# mr10 = df['MR'].nsmallest(n=10).index.values
# ten = pd.DataFrame([pt10, n10, rh10, mr10]).T
# ten.columns = ['PT', 'N', 'RH', 'MR']
# ten.index = ind
# for i, vc_df in ten.iterrows():
# mlh_0 = vc_df.value_counts()[vc_df.value_counts() > 2]
# if mlh_0.empty:
# continue
# else:
# mlh = mlh_0.index.item()
# return mlh
# print('MLH Not found using W&W!')
# return np.nan
def plot_pblh_radiosonde(path=sound_path, reduce='median', fontsize=20):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
pblh = xr.load_dataset(
sound_path /
'PBLH_classification_bet_dagan_2s_sounding_2014-2019.nc')
if reduce == 'median':
pblh_r = pblh.groupby('sound_time.month').median()
elif reduce == 'mean':
pblh_r = pblh.groupby('sound_time.month').mean()
pblh_c = pblh.groupby('sound_time.month').count()
count_total = pblh_c.sum()
df = pblh_r.to_dataframe()
df[['SBLH_c', 'RBLH_c', 'CBLH_c']] = pblh_c.to_dataframe()
fig, axes = plt.subplots(3, 1, sharex=False, sharey=False, figsize=(10, 10))
line_color = 'black'
bar_color = 'tab:orange'
df['CBLH'].plot(ax=axes[0], linewidth=2, color=line_color, marker='o', label='CBL', legend=True)
tw_0 = axes[0].twinx()
tw_0.bar(x=df.index.values, height=df['CBLH_c'].values, color=bar_color, alpha=0.4)
df['RBLH'].plot(ax=axes[1], linewidth=2, color=line_color, marker='o', label='RBL', legend=True)
tw_1 = axes[1].twinx()
tw_1.bar(x=df.index.values, height=df['RBLH_c'].values, color=bar_color, alpha=0.4)
df['SBLH'].plot(ax=axes[2], linewidth=2, color=line_color, marker='o', label='SBL', legend=True)
tw_2 = axes[2].twinx()
tw_2.bar(x=df.index.values, height=df['SBLH_c'].values, color=bar_color, alpha=0.4)
axes[0].set_ylabel('CBL [m]', fontsize=fontsize)
axes[1].set_ylabel('RBL [m]', fontsize=fontsize)
axes[2].set_ylabel('SBL [m]', fontsize=fontsize)
tw_0.set_ylabel('Launch ({} total)'.format(count_total['CBLH'].values), fontsize=fontsize)
tw_1.set_ylabel('Launch ({} total)'.format(count_total['RBLH'].values), fontsize=fontsize)
tw_2.set_ylabel('Launch ({} total)'.format(count_total['SBLH'].values), fontsize=fontsize)
[ax.set_xticks(np.arange(1,13,1)) for ax in axes]
[ax.grid() for ax in axes]
[ax.tick_params(labelsize=fontsize) for ax in axes]
[ax.tick_params(labelsize=fontsize) for ax in [tw_0, tw_1, tw_2]]
fig.suptitle(
'PBL {} Height from Bet-Dagan radiosonde (2014-2019)'.format(reduce),
fontsize=fontsize)
fig.tight_layout()
return fig
def align_rbl_times_cloud_h1_pwv(rbl_cat, path=work_yuval,
ceil_path=ceil_path, pw_station='tela',
plot_diurnal=True, fontsize=16):
from ceilometers import read_BD_ceilometer_yoav_all_years
import xarray as xr
import matplotlib.pyplot as plt
from aux_gps import anomalize_xr
import numpy as np
# first load cloud_H1 and pwv:
cld = read_BD_ceilometer_yoav_all_years(path=ceil_path)['cloud_H1']
cld[cld==0]=np.nan
ds = cld.to_dataset(name='cloud_H1')
pwv = xr.open_dataset(
path /
'GNSS_PW_thresh_50.nc')[pw_station]
pwv.load()
pw_name = 'pwv_{}'.format(pw_station)
bins_name = '{}'.format(rbl_cat.name)
ds[pw_name] = pwv.sel(time=pwv['time.season']=='JJA')
daily_pwv_total = ds[pw_name].groupby('time.hour').count().sum()
print(daily_pwv_total)
daily_pwv = anomalize_xr(ds[pw_name]).groupby('time.hour').mean()
# now load rbl_cat with attrs:
ds[bins_name] = rbl_cat
ds = ds.dropna('time')
# change dtype of bins to int:
ds[bins_name] = ds[bins_name].astype(int)
# produce pwv anomalies regarding the bins:
pwv_anoms = ds[pw_name].groupby(ds[bins_name]) - ds[pw_name].groupby(ds[bins_name]).mean('time')
counts = ds.groupby('time.hour').count()['cloud_H1']
ds['pwv_{}_anoms'.format(pw_station)] = pwv_anoms.reset_coords(drop=True)
if plot_diurnal:
fig, axes = plt.subplots(figsize=(15, 8))
df_hour = ds['pwv_tela_anoms'].groupby('time.hour').mean().to_dataframe()
df_hour['cloud_H1'] = ds['cloud_H1'].groupby('time.hour').mean()
df_hour['cloud_H1_counts'] = counts
df_hour['pwv_tela_daily_anoms'] = daily_pwv
df_hour['pwv_tela_anoms'].plot(marker='s', ax=axes, linewidth=2)
df_hour['pwv_tela_daily_anoms'].plot(ax=axes, marker='s', color='r', linewidth=2)
# ax2 = df_hour['cloud_H1'].plot(ax=axes[0], secondary_y=True, marker='o')
ax2 = df_hour['cloud_H1_counts'].plot(ax=axes, secondary_y=True, marker='o', linewidth=2)
axes.set_ylabel('PWV TELA anomalies [mm]', fontsize=fontsize)
axes.set_xlabel('Hour of day [UTC]', fontsize=fontsize)
ax2.set_ylabel('Cloud H1 data points', fontsize=fontsize)
axes.set_xticks(np.arange(0, 24, 1))
axes.xaxis.grid()
handles,labels = [],[]
for ax in fig.axes:
for h,l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
axes.legend(handles,labels, fontsize=fontsize)
# counts.to_dataframe(name='Count').plot(kind='bar', color='tab:blue', alpha=0.5, ax=axes[1], rot=0)
# axes[1].bar(x=np.arange(0, 24, 1), height=counts.values, color='tab:blue', alpha=0.5)
# axes[1].set_xticks(np.arange(0, 24, 1))
axes.tick_params(labelsize=fontsize)
ax2.tick_params(labelsize=fontsize)
fig.tight_layout()
fig.suptitle('PWV TELA anomalies and Cloud H1 counts for JJA', fontsize=fontsize)
fig.subplots_adjust(top=0.951,
bottom=0.095,
left=0.071,
right=0.936,
hspace=0.2,
wspace=0.2)
return ds
def categorize_da_ts(da_ts, season=None, add_hours_to_dt=None, resample=True,
bins=[0, 200, 400, 800, 1000, 1200, 1400, 1600, 1800, 2000, 2500]):
# import xarray as xr
import numpy as np
import pandas as pd
time_dim = list(set(da_ts.dims))[0]
if season is not None:
da_ts = da_ts.sel(
{time_dim: da_ts['{}.season'.format(time_dim)] == season})
print('{} season selected'.format(season))
# bins = rbl.quantile(
# [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0])
if da_ts.name is None:
name = 'MLH'
else:
name = da_ts.name
# rename sound_time to time:
da_ts = da_ts.rename({time_dim: 'time'})
df = da_ts.to_dataframe(name=name)
labels = np.arange(0, len(bins) - 1)
df['{}_bins'.format(name)] = pd.cut(
df['{}'.format(name)], bins=bins, labels=labels, retbins=False)
df_bins = df['{}_bins'.format(name)]
if add_hours_to_dt is not None:
print('adding {} hours to datetimes.'.format(add_hours_to_dt))
df_bins.index += pd.Timedelta(add_hours_to_dt, unit='H')
if resample:
re = []
for row in df_bins.dropna().to_frame().iterrows():
bin1 = row[1].values
new_time = pd.date_range(row[0], periods=288, freq='5T')
new_bins = [bin1 for x in new_time]
re.append(pd.DataFrame(new_bins, index=new_time, columns=['{}_bins'.format(name)]))
# df_bins = df_bins.resample('5T').ffill(limit=576).dropna()
df_bins = pd.concat(re, axis=0)
print('resampling to 5 mins using ffill.')
# result = xr.apply_ufunc(np.digitize, rbl, kwargs={'bins': bins})
# df = result.to_dataframe('bins')
# df['rbl'] = rbl.to_dataframe(name='rbl')
# means = df['rbl'].groupby(df['bins']).mean()
# or just:
# rbl_bins = rbl.to_dataset(name='rbl').groupby_bins(group='rbl',bins=bins, labels=np.arange(1, len(bins))).groups
# grp = df.groupby('{}_bins'.format(name)).groups
print('categorizing to bins: {}'.format(','.join([str(x) for x in bins])))
df_bins.index.name = 'time'
da = df_bins.to_xarray().to_array(name='{}_bins'.format(name)).squeeze(drop=True)
# get the bins borders and insert them as attrs to da:
dumm = pd.cut(df['{}'.format(name)], bins=bins, labels=None, retbins=False)
left = [x.left for x in dumm.dtype.categories]
right = [x.right for x in dumm.dtype.categories]
for i, label in enumerate(labels):
da.attrs[str(label)] = [float(left[i]), float(right[i])]
da.attrs['units'] = da_ts.attrs['units']
return da
def prepare_radiosonde_and_solve_MLH(ds, method='T', max_height=300):
import xarray as xr
import pandas as pd
ds = ds.drop_sel(time=pd.to_timedelta(0, unit='s'))
# nullify the first Height:
ds['Height'] -= ds['Height'].isel(time=0)
pbls = []
stimes = []
for i in range(ds['sound_time'].size):
if method == 'T':
mlh = find_surface_inversion_height(
ds.isel(
sound_time=i).reset_coords(
drop=True), max_height=max_height)
elif method == 'rig':
mlh = find_MLH_from_2s_richardson(
ds.isel(
sound_time=i).reset_coords(
drop=True), method='grad')
elif method == 'WW':
mlh = find_MLH_from_2s_WW2014(
ds.isel(
sound_time=i), alt_cutoff=max_height)
elif method == 'rib':
mlh = find_MLH_from_2s_richardson(
ds.isel(
sound_time=i), method='bulk')
if mlh is not None:
pbls.append(mlh)
stimes.append(ds.isel(sound_time=i)['sound_time'])
sound_time = xr.concat(stimes, 'sound_time')
pbl = xr.DataArray([x.values for x in pbls], dims=['sound_time'])
pbl['sound_time'] = sound_time
pbl = pbl.sortby('sound_time')
pbl.attrs['method'] = method
if max_height is not None:
pbl.attrs['max_height'] = max_height
return pbl
def classify_bet_dagan_pblh(path=sound_path, savepath=None):
import xarray as xr
from aux_gps import save_ncfile
ds = xr.load_dataset(path / 'bet_dagan_2s_sounding_2014-2019.nc')
sbl = classify_SBL(ds, method='T', sbl_max_height=300, filter_cbl=True)
rbl = classify_RBL(ds, sbl, method='WW', max_height=3500)
cbl = classify_CBL(ds, method='WW', max_height=3500)
dss = xr.merge([sbl, rbl, cbl])
if savepath is not None:
filename = 'PBLH_classification_bet_dagan_2s_sounding_2014-2019.nc'
save_ncfile(dss, savepath, filename)
return dss
def classify_SBL(ds, method='T', sbl_max_height=300, filter_cbl=True):
"""Run find_surface_inversion using T gradient and filter all 12Z records
use method=T for temp inversion, rig for gradient richardson"""
# TODO: fix gradient richardson method
print('classifying SBL...')
sbl = prepare_radiosonde_and_solve_MLH(
ds, method=method, max_height=sbl_max_height)
if filter_cbl:
print('filtered {} 12Z records.'.format(
sbl[sbl['sound_time.hour'] == 12].count().item()))
sbl = sbl[sbl['sound_time.hour'] == 00]
sbl.name = 'SBLH'
sbl.attrs['long_name'] = 'Stable Boundary Layer Height'
sbl.attrs['units'] = 'm'
sbl.attrs['method'] = method
return sbl
def classify_RBL(ds, sbl, method='WW', max_height=3500):
import pandas as pd
print('classifying RBL...')
# filter SBLs, first assemble all 00Z:
Z00_st = ds['T'].transpose('sound_time', 'time')[
ds['sound_time.hour'] == 00]['sound_time']
# ds = ds.sel(sound_time=Z00_st)
# take out the SBL events:
sbl_st = sbl['sound_time']
st = pd.to_datetime(
list(set(Z00_st.values).difference(set(sbl_st.values))))
ds = ds.sel(sound_time=st)
rbl = prepare_radiosonde_and_solve_MLH(
ds, method=method, max_height=max_height)
print(
'found {} RBLs from total of {}'.format(
rbl['sound_time'].size,
st.size))
rbl.name = 'RBLH'
rbl.attrs['long_name'] = 'Residual Boundary Layer Height'
rbl.attrs['units'] = 'm'
rbl.attrs['rbl_candidates'] = st.size
rbl.attrs['rbl_success'] = rbl['sound_time'].size
rbl.attrs['method'] = method
return rbl
def classify_CBL(ds, method='WW', max_height=3500):
# filter only daytime:
print('classifying CBL...')
Z12_st = ds['T'].transpose('sound_time', 'time')[
ds['sound_time.hour'] == 12]['sound_time']
ds = ds.sel(sound_time=Z12_st)
cbl = prepare_radiosonde_and_solve_MLH(
ds, method=method, max_height=max_height)
print(
'found {} CBLs from total of {}'.format(
cbl['sound_time'].size,
ds['sound_time'].size))
cbl.name = 'CBLH'
cbl.attrs['long_name'] = 'Convective Boundary Layer Height'
cbl.attrs['units'] = 'm'
cbl.attrs['cbl_candidates'] = ds['sound_time'].size
cbl.attrs['cbl_success'] = cbl['sound_time'].size
cbl.attrs['method'] = method
return cbl
def find_surface_inversion_height(ds, min_height=None, max_height=300, max_time=None):
"""calculate surface inversion height in meters, surface is defined as
max_time in seconds after radiosonde launch or max_height in meters,
if we find the surface invesion layer height is it a candidate for SBL
(use Rig), if not, and it is at night use Rib, W&W to find RBL"""
import numpy as np
from aux_gps import smooth_xr
# from aux_gps import smooth_xr
# ds = smooth_xr(ds[['T', 'Height']], dim='time')
dsize = len(ds.dims)
if dsize != 1:
raise('ds dimensions should be 1!')
new_ds = ds[['T', 'Height']]
if max_height is None and max_time is None:
raise('Pls pick either max_time or max_height...')
if max_height is None and max_time is not None:
new_ds = new_ds.isel(time=slice(0, max_time))
# T_diff = (-ds['T'].diff('time').isel(time=slice(0, max_time)))
elif max_height is not None and max_time is None:
new_ds = new_ds.where(ds['Height'] <= max_height, drop=True)
# T_diff = (-ds['T'].diff('time').where(ds['Height']
# <= max_height, drop=True))
if min_height is not None:
new_ds = new_ds.where(ds['Height'] >= min_height, drop=True)
T = new_ds['T']
H = new_ds['Height']
T['time'] = H.values
T = T.rename(time='Height')
dT = smooth_xr(T.differentiate('Height'), 'Height')
dT = dT.dropna('Height')
indLeft = np.searchsorted(-dT, 0, side='left')
indRight = np.searchsorted(-dT, 0, side='right')
if indLeft == indRight:
ind = indLeft
mlh = H[ind -1: ind + 1].mean()
else:
ind = indLeft
mlh = H[ind]
# condition for SBL i.e., the temp increases with height until reveres
positive_dT = (dT[0] - dT[ind - 1]) > 0
last_ind = dT.size - 1
if (ind < last_ind) and (ind > 0) and positive_dT:
return mlh
else:
return None
# T_diff = T_diff.where(T_diff < 0)
# if T_diff['time'].size != 0:
# if dsize == 1:
# inversion_time = T_diff.idxmin('time')
# inversion_height = ds['Height'].sel(time=inversion_time)
# elif dsize == 2:
# inversion_time = T_diff.idxmin('time').dropna('sound_time')
# inversion_height = ds['Height'].sel(time=inversion_time, sound_time=inversion_time['sound_time'])
# else:
# inversion_height = None
# return inversion_height
def calculate_temperature_lapse_rate_from_2s_radiosonde(ds, radio_time=2,
Height=None):
"""calculate the \Gamma = -dT/dz (lapse rate), with either radio_time (2 s after launch)
or Height at certain level"""
import numpy as np
# check for dims:
dsize = len(ds.dims)
if dsize > 2 or dsize < 1:
raise('ds dimensions should be 1 or 2...')
T = ds['T']
H = ds['Height']
T0 = T.isel(time=0)
H0 = H.isel(time=0)
if radio_time is None and Height is None:
raise('Pls pick either radio_time or Height...')
if radio_time is not None and Height is None:
radio_time = np.timedelta64(radio_time, 's')
T1 = T.sel(time=radio_time, method='nearest')
H1 = H.sel(time=radio_time, method='nearest')
seconds_after = T['time'].sel(time=radio_time, method='nearest').dt.seconds.item()
if dsize == 1:
height = H.sel(time=radio_time, method='nearest').item()
dz = height - H0.item()
elif dsize == 2:
height = H.sel(time=radio_time, method='nearest').mean().item()
dz = (H1 - H0).mean().item()
method = 'time'
elif radio_time is None and Height is not None:
if dsize == 1:
t1 = (np.abs(H-Height)).idxmin().item()
elif dsize == 2:
t1 = (np.abs(H-Height)).idxmin('time')
H1 = H.sel(time=t1)
T1 = T.sel(time=t1)
if dsize == 1:
height = H1.item()
seconds_after = T['time'].sel(time=t1).dt.seconds.item()
dz = height - H0.item()
elif dsize == 2:
height = H1.mean().item()
dz = (H1 - H0).mean().item()
seconds_after = T['time'].sel(time=t1).dt.seconds.mean().item()
method = 'height'
gamma = -1* (T0 - T1) / ((H1 - H0) / 1000)
gamma.attrs['units'] = 'degC/km'
gamma.name = 'Gamma'
gamma.attrs['long_name'] = 'temperature lapse rate'
gamma.attrs['Height_taken'] = '{:.2f}'.format(height)
gamma.attrs['dz [m]'] = '{:.2f}'.format(dz)
gamma.attrs['seconds_after_launch'] = seconds_after
gamma.attrs['method'] = method
return gamma
def plot_2s_radiosonde_single_profile(ds, max_height=1000, rib_lims=None,
plot_type='WW'):
# drop the first time:
# ds1=ds1.drop_sel(time=pd.to_timedelta(0,unit='s'))
# nullify the first Height:
# ds1['Height'] -= ds1['Height'][0]
# fix 31-35 meters ambiguity
# fix PT in K and in degC = probably calculated in K and sub 273.15
import matplotlib.pyplot as plt
import pandas as pd
assert len(ds.dims) == 1 and 'time' in ds.dims
dt = pd.to_datetime(ds['sound_time'].values)
# ds['Height'] -= ds['Height'][0]
mlh_rib = find_MLH_from_2s_richardson(ds, method='bulk')
mlh_rig = find_MLH_from_2s_richardson(ds, method='grad')
mlh_ww = find_MLH_from_2s_WW2014(ds, alt_cutoff=3500)
mlh_t = find_surface_inversion_height(ds, max_time=None, max_height=300)
ds['rib'] = calculate_richardson_from_2s_radiosonde(ds, method='bulk')
ds['rig'] = calculate_richardson_from_2s_radiosonde(ds, method='grad')
ds['N'] = calculate_atmospheric_refractivity(ds['P'], ds['T'], ds['RH'])
T_k = ds['T'] + 273.15
T_k.attrs['units'] = 'K'
ds['PT'] = wrap_xr_metpy_potential_temperature(ds['P'], T_k) # in degC
ds = ds.assign_coords(time=ds['Height'].values)
ds = ds.drop('Height')
ds = ds.rename(time='Height')
df = ds[['T', 'rib', 'rig', 'PT', 'RH', 'MR', 'N']].to_dataframe()
if max_height is not None:
df = df[df.index <= max_height]
df['Height'] = df.index.values
df['MR'] *= 1000 # to g/kg
df['PT'] -= 273.15
if plot_type == 'WW':
fig, axes = plt.subplots(
1, 5, sharey=False, sharex=False, figsize=(
20, 15))
df.plot.line(
ax=axes[0],
x='rib',
y='Height',
marker='.',
legend=False,
grid=True,
color='g')
axes[0].axhline(y=mlh_rib,color='k', linestyle='-', linewidth=1.5)
df.plot.line(
ax=axes[1],
x='PT',
y='Height',
marker='.',
legend=False,
grid=True,
color='g')
df.plot.line(
ax=axes[2],
x='RH',
y='Height',
marker='.',
legend=False,
grid=True,
color='g')
df.plot.line(
ax=axes[3],
x='MR',
y='Height',
marker='.',
legend=False,
grid=True,
color='g')
df.plot.line(
ax=axes[4],
x='N',
y='Height',
marker='.',
legend=False,
grid=True,
color='g')
[ax.axhline(y=mlh_ww, color='k', linestyle='-', linewidth=1.5) for ax in axes[1:4]]
axes[0].set_xlabel('Ri$_b$')
axes[0].axvline(0.25, color='k', linestyle='--')
axes[1].set_xlabel('$\Theta$ [$\degree$C]')
axes[2].set_xlabel('RH [%]')
axes[3].set_xlabel('w [g/kg]')
axes[4].set_xlabel('N')
axes[0].set_ylabel('z [m]')
if rib_lims is not None:
axes[0].set_xlim(*rib_lims)
elif plot_type == 'T':
fig, axes = plt.subplots(
1, 3, sharey=False, sharex=False, figsize=(
20, 15))
df.plot.line(
ax=axes[0],
x='T',
y='Height',
marker='.',
legend=False,
grid=True,
color='g')
if mlh_t is not None:
axes[0].axhline(y=mlh_t,color='k', linestyle='-', linewidth=1.5)
df.plot.line(
ax=axes[1],
x='rig',
y='Height',
marker='.',
legend=False,
grid=True,
color='g')
axes[1].axhline(y=mlh_rig, color='k', linestyle='-', linewidth=1.5)
df.plot.line(
ax=axes[2],
x='rib',
y='Height',
marker='.',
legend=False,
grid=True,
color='g')
axes[1].set_ylim(0, max_height)
axes[2].axhline(y=mlh_rib, color='k', linestyle='-', linewidth=1.5)
axes[0].set_xlabel('T [$\degree$C]')
axes[1].set_xlabel('Ri$_g$')
axes[2].set_xlabel('Ri$_b$')
axes[1].axvline(0.25, color='k', linestyle='--')
axes[2].axvline(0.25, color='k', linestyle='--')
axes[0].set_ylabel('Z [m]')
if rib_lims is not None:
axes[2].set_xlim(*rib_lims)
fig.suptitle(dt)
return fig
def calculate_richardson_from_2s_radiosonde(ds, g=9.79474, method='bulk'):
import numpy as np
dsize = len(ds.dims)
if dsize == 2:
axis=1
else:
axis=0
T_k = ds['T'] + 273.15
T_k.attrs['units'] = 'K'
PT = wrap_xr_metpy_potential_temperature(ds['P'], T_k)
VPT = wrap_xr_metpy_virtual_potential_temperature(ds['P'], ds['T'], ds['MR'])
U = ds['u']
V = ds['v']
H = ds['Height']
# VPT = wrap_xr_metpy_virtual_potential_temperature(ds['P'], ds['T'], ds['MR']*1000)
# VPT = PT * (1 +ds['MR']*1000/0.622)/(1+ds['MR']*1000) - 273.15
# VPT_mean = VPT.cumsum('time') / (np.arange(H.size) + 1)
if method == 'bulk':
H0 = H.isel(time=0)
# H0 = 0
U0 = U.isel(time=0)
V0 = V.isel(time=0)
VPT_0 = VPT.isel(time=0)
U0 = 0
V0 = 0
U2 = (U - U0)**2.0
V2 = (V - V0)**2.0
Rib_values = g * (VPT - VPT_0) * (H - H0) / ((VPT_0) * (U2 + V2))
# Rib_values = g * (VPT - VPT_0) * (H - H0) / ((VPT_mean) * (U2 + V2))
Ri = VPT.copy(data=Rib_values)
Ri.name = 'Rib'
Ri.attrs.update(long_name='Bulk Richardson Number')
Ri.attrs.update(units='dimensionless')
elif method == 'grad':
# PT -= 273.15
BVF2 = wrap_xr_metpy_brunt_vaisala_f2(H, PT, verbose=False, axis=axis)
# U.assign_coords(time=H)
# V.assign_coords(time=H)
# U = U.rename(time='Height')
# V = V.rename(time='Height')
# dU = U.differentiate('Height').values
# dV = V.differentiate('Height').values
dU = (U.differentiate('time') / H.differentiate('time')).values
dV = (V.differentiate('time') / H.differentiate('time')).values
Ri = BVF2 / (dU**2 + dV**2)
Ri.name = 'Rig'
Ri.attrs.update(long_name='Gradient Richardson Number')
Ri.attrs.update(units='dimensionless')
return Ri
#
#def calculate_bulk_richardson_from_physical_radiosonde(VPT, U, V, g=9.79474,
# initial_height_pos=0):
# import numpy as np
# z = VPT['Height'] # in meters
# z0 = VPT['Height'].isel(Height=initial_height_pos)
# U0 = U.isel(Height=int(initial_height_pos))
# V0 = V.isel(Height=int(initial_height_pos))
# VPT_0 = VPT.isel(Height=int(initial_height_pos))
# VPT_mean = VPT.cumsum('Height') / (np.arange(VPT.Height.size) + 1)
## U.loc[dict(Height=35)]=0
## V.loc[dict(Height=35)]=0
# U2 = (U-U0)**2.0
# V2 = (V-V0)**2.0
## WS2 = (WS * 0.51444445)**2
## Rib_values = g * (VPT - VPT_0) * (z) / ((VPT_mean) * (U2 + V2))
# Rib_values = g * (VPT - VPT_0) * (z - z0) / ((VPT_0) * (U2 + V2))
# Rib = VPT.copy(data=Rib_values)
# Rib.name = 'Rib'
# Rib.attrs.update(long_name='Bulk Richardson Number')
# Rib.attrs.update(units='dimensionless')
# return Rib
#def calculate_gradient_richardson_from_physical_radiosonde(BVF2, U, V):
# dU = U.differentiate('Height')
# dV = V.differentiate('Height')
# Rig = BVF2 / (dU**2 + dV**2)
# Rig.name = 'Rig'
# Rig.attrs.update(long_name='Gradient Richardson Number')
# Rig.attrs.update(units='dimensionless')
# return Rig
def find_MLH_from_2s_WW2014(ds, alt_cutoff=None, eps=50,
return_found_df=False, plot=False):
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from aux_gps import smooth_xr
import xarray as xr
dsize = len(ds.dims)
if dsize != 1:
raise('ds has to be 1D!')
if 'PT' not in ds:
T_k = ds['T'] + 273.15
T_k.attrs['units'] = 'K'
ds['PT'] = wrap_xr_metpy_potential_temperature(ds['P'], T_k)
if 'N' not in ds:
ds['N'] = calculate_atmospheric_refractivity(ds['P'], ds['T'], ds['RH'])
dt = pd.to_datetime(ds['sound_time'].item())
dss = ds[['PT', 'RH', 'MR', 'N']].reset_coords(drop=True)
df_plot = dss.to_dataframe()
df_plot['Height'] = ds['Height'].values
df_plot = df_plot.dropna().set_index('Height')
if alt_cutoff is not None:
dss = dss.where(ds['Height'] <= alt_cutoff, drop=True)
H = ds['Height'].where(ds['Height'] <= alt_cutoff, drop=True)
dss['time'] = H
dss = dss.rename(time='Height')
# nearest_cutoff = ds['Height'].dropna('time').sel(Height=alt_cutoff, method='nearest')
# dss = dss.sel(Height=slice(None, nearest_cutoff))
dss = dss.differentiate('Height')
dss = smooth_xr(dss, 'Height')
df = dss.to_dataframe().dropna()
ind = np.arange(1, 11)
pt10 = df['PT'].nlargest(n=10).index.values
n10 = df['N'].nsmallest(n=10).index.values
rh10 = df['RH'].nsmallest(n=10).index.values
mr10 = df['MR'].nsmallest(n=10).index.values
ten = pd.DataFrame([pt10, n10, rh10, mr10]).T
ten.columns = ['PT', 'N', 'RH', 'MR']
ten.index = ind
found = []
for i, vc_df in ten.iterrows():
row_sorted = vc_df.sort_values()
diff_3_1 = row_sorted[3] - row_sorted[1]
diff_2_0 = row_sorted[2] - row_sorted[0]
if (diff_3_1 <= eps) and (diff_2_0 <= eps):
found.append(row_sorted)
elif diff_3_1 <= eps:
found.append(row_sorted[1:4])
elif diff_2_0 <= eps:
found.append(row_sorted[0:3])
# mlh_0 = vc_df.value_counts()[vc_df.value_counts() > 2]
# if mlh_0.empty:
# continue
# else:
# mlh = mlh_0.index.item()
# return mlh
if not found:
print('MLH Not found for {} using W&W!'.format(dt))
return None
found_df = pd.concat(found, axis=1).T
mlh_mean = found_df.iloc[0].mean()
if return_found_df:
return found_df
if plot:
# df.plot(subplots=True)
fig, ax = plt.subplots(1, 4, figsize=(20, 16))
df_plot.loc[0: 1200, 'PT'].reset_index().plot.line(y='Height', x='PT', ax=ax[0], legend=False)
df_plot.loc[0: 1200, 'RH'].reset_index().plot.line(y='Height', x='RH', ax=ax[1], legend=False)
df_plot.loc[0: 1200, 'MR'].reset_index().plot.line(y='Height', x='MR', ax=ax[2], legend=False)
df_plot.loc[0: 1200, 'N'].reset_index().plot.line(y='Height', x='N', ax=ax[3], legend=False)
[x.grid() for x in ax]
[ax[0].axhline(y=mlh, color='r', linestyle='--') for mlh in found_df['PT'].dropna()]
[ax[1].axhline(y=mlh, color='r', linestyle='--') for mlh in found_df['RH'].dropna()]
[ax[2].axhline(y=mlh, color='r', linestyle='--') for mlh in found_df['MR'].dropna()]
[ax[3].axhline(y=mlh, color='r', linestyle='--') for mlh in found_df['N'].dropna()]
[axx.axhline(y=mlh_mean,color='k', linestyle='-', linewidth=1.5) for axx in ax]
[axx.set_ylim(0, 1200) for axx in ax]
[axx.autoscale(enable=True, axis='x', tight=True) for axx in ax]
return xr.DataArray(mlh_mean)
def find_MLH_from_2s_richardson(ds, crit=0.25, method='bulk'):
import numpy as np
ri_dict = {'bulk': 'rib', 'grad': 'rig'}
ri_name = ri_dict.get(method)
if ri_name not in ds:
ds[ri_name] = calculate_richardson_from_2s_radiosonde(
ds, method=method)
# if ri_name == 'rig':
# ds[ri_name] = smooth_xr(ds[ri_name])
indLeft = np.searchsorted(ds[ri_name], crit, side='left')
indRight = np.searchsorted(ds[ri_name], crit, side='right')
if indLeft == indRight:
ind = indLeft
last_ind = ds[ri_name].size - 1
if (ind < last_ind) and (ind > 0):
assert ds[ri_name][ind - 1] < crit
mlh = ds['Height'][ind - 1: ind + 1].mean()
else:
return None
else:
if (ind < last_ind) and (ind > 0):
ind = indLeft
mlh = ds['Height'][indLeft]
else:
return None
# mlh_time = np.abs(ds[ri_name] - crit).idxmin('time')
# mlh = ds['Height'].sel(time=mlh_time)
mlh.name = 'MLH'
mlh.attrs['long_name'] = 'Mixing Layer Height'
return mlh
#def calculate_MLH_from_Rib_single_profile(Rib_df, crit=0.25):
# import numpy as np
# # drop first row:
# # df = Rib_df.drop(35)
## # get index position of first closest to crit:
## i = df['Rib'].sub(crit).abs().argmin() + 1
## df_c = df.iloc[i-2:i+2]
# indLeft = np.searchsorted(Rib_df['Rib'], crit, side='left')
# indRight = np.searchsorted(Rib_df['Rib'], crit, side='right')
# if indLeft == indRight:
# ind = indLeft
# else:
# ind = indLeft
# mlh = Rib_df.index[ind]
# # mlh = df['Rib'].sub(crit).abs().idxmin()
# return mlh
#def calculate_Rib_MLH_all_profiles(sound_path=sound_path, crit=0.25, hour=12,
# data_type='phys', savepath=None):
# from aux_gps import save_ncfile
# import xarray as xr
# from PW_from_gps_figures import plot_seasonal_histogram
# if data_type == 'phys':
# bd = xr.load_dataset(sound_path /
# 'bet_dagan_phys_sounding_2007-2019.nc')
# pos = 0
# elif data_type == 'edt':
# bd = xr.load_dataset(sound_path /
# 'bet_dagan_edt_sounding_2016-2019.nc')
# pos = 2
# Rib = calculate_bulk_richardson_from_physical_radiosonde(bd['VPT'], bd['U'],
# bd['V'], g=9.79474,
# initial_height_pos=pos)
# mlh = calculate_MLH_time_series_from_all_profiles(Rib, crit=crit,
# hour=hour, plot=False)
# plot_seasonal_histogram(mlh, dim='sound_time', xlim=(-100, 3000),
# xlabel='MLH [m]',
# suptitle='MLH histogram using Rib method')
# if savepath is not None:
# filename = 'MLH_Rib_{}_{}_{}.nc'.format(
# str(crit).replace('.', 'p'), data_type, hour)
# save_ncfile(mlh, sound_path, filename)
# return mlh
#def calculate_MLH_time_series_from_all_profiles(Rib, crit=0.25, hour=12,
# dim='sound_time', plot=True):
# from aux_gps import keep_iqr
# import matplotlib.pyplot as plt
# import xarray as xr
# rib = Rib.sel(sound_time=Rib['sound_time.hour'] == hour)
# mlhs = []
# for time in rib[dim]:
# # print('proccessing MLH retreival of {} using Rib at {}'.format(
# # time.dt.strftime('%Y-%m-%d:%H').item(), crit))
# df = rib.sel({dim: time}).reset_coords(drop=True).to_dataframe()
# mlhs.append(calculate_MLH_from_Rib_single_profile(df, crit=crit))
# da = xr.DataArray(mlhs, dims=[dim])
# da[dim] = rib[dim]
# da.name = 'MLH'
# da.attrs['long_name'] = 'Mixing layer height'
# da.attrs['units'] = 'm'
# da.attrs['method'] = 'Rib@{}'.format(crit)
# if plot:
# da = keep_iqr(da, dim)
# fig, ax = plt.subplots(figsize=(15, 6))
# ln = da.plot(ax=ax)
# ln200 = da.where(da >= 200).plot(ax=ax)
# lnmm = da.where(da > 200).resample(
# {dim: 'MS'}).mean().plot(ax=ax, linewidth=3, color='r')
# ax.legend(ln + ln200 + lnmm,
# ['MLH', 'MLH above 200m', 'MLH above 200m monthly means'])
# ax.grid()
# ax.set_ylabel('MLH from Rib [m]')
# ax.set_xlabel('')
# fig.tight_layout()
# return da
#def solve_MLH_with_all_crits(RiB, mlh_all=None, hour=12, cutoff=200,
# plot=True):
# import xarray as xr
# import matplotlib.pyplot as plt
# import seaborn as sns
# from aux_gps import keep_iqr
# if mlh_all is None:
# mlhs = []
# crits = [0.25, 0.33, 0.5, 0.75, 1.0]
# for crit in crits:
# print('solving mlh for {} critical RiB value.'.format(crit))
# mlh = calculate_MLH_time_series_from_all_profiles(RiB, crit=crit,
# hour=hour,
# plot=False)
# mlh = keep_iqr(mlh, dim='sound_time')
# mlhs.append(mlh)
#
# mlh_all = xr.concat(mlhs, 'crit')
# mlh_all['crit'] = crits
# if cutoff is not None:
# mlh_all = mlh_all.where(mlh_all >= cutoff)
# if plot:
# cmap = sns.color_palette("colorblind", 5)
# fig, ax = plt.subplots(3, 1, sharex=True, figsize=(12, 9))
# df_mean = mlh_all.groupby('sound_time.month').mean().to_dataset('crit').to_dataframe()
# df_mean.plot(color=cmap, style=['-+', '-.', '-', '--', '-.'], ax=ax[0])
# ax[0].grid()
# ax[0].set_ylabel('Mean MLH [m]')
# ax[0].set_title(
# 'Annual mixing layer height from Bet-Dagan radiosonde profiles ({}Z) using RiB method'.format(hour))
# df_std = mlh_all.groupby('sound_time.month').std().to_dataset('crit').to_dataframe()
# df_std.plot(color=cmap, style=['-+', '-.', '-', '--', '-.'], ax=ax[1])
# ax[1].grid()
# ax[1].set_ylabel('Std MLH [m]')
# df_count = mlh_all.groupby('sound_time.month').count().to_dataset('crit').to_dataframe()
# df_count.plot(color=cmap, style=['-+', '-.', '-', '--', '-.'], ax=ax[2])
# ax[2].grid()
# ax[2].set_ylabel('Count MLH [#]')
# fig.tight_layout()
# return mlh_all
def scatter_plot_MLH_PWV(ds, season='JJA', crit=0.25):
import matplotlib.pyplot as plt
if season is not None:
ds = ds.sel(sound_time=ds['sound_time.season'] == season)
ds = ds.sel(crit=crit)
hour = list(set(ds['sound_time'].dt.hour.values))[0]
days = ds['PWV_MLH'].dropna('sound_time').size
fig, ax = plt.subplots(figsize=(8, 6))
ax.scatter(ds['PWV_MLH'], ds['MLH'], alpha=0.75, marker='o')
ax.scatter(ds['PWV_max']-ds['PWV_MLH'], ds['MLH'], alpha=0.75, marker='s')
ax.grid()
ax.set_xlabel('PWV [mm]')
ax.set_ylabel('MLH [m]')
ax.set_title(
'MLH from Bet_Dagan profiles (RiB={},{}Z) in {} vs. PWV ({} days)'.format(
crit, hour,
season, days))
ax.legend(['PWV below MLH', 'PWV above MLH'])
return fig
def process_all_MLH_with_PWV(MLH_all, PWV):
import xarray as xr
mlhs = []
for crit in MLH_all.crit:
print('proccesing mlh-pwv for {} critical RiB value.'.format(crit.item()))
ds = return_PWV_with_MLH_values(PWV, MLH_all.sel(crit=crit))
mlhs.append(ds)
ds = xr.concat(mlhs, 'crit')
ds['crit'] = MLH_all.crit
return ds
def return_PWV_with_MLH_values(PW, MLH, dim='sound_time'):
import xarray as xr
pws = []
pw_max = []
MLH = MLH.dropna(dim)
for time in MLH[dim]:
pws.append(PW.sel({dim: time}).sel(Height=MLH.sel({dim: time})))
pw_max.append(PW.sel({dim: time}).max())
pw_da = xr.concat(pws, dim)
pw_da_max = xr.concat(pw_max, dim)
ds = xr.Dataset()
ds['PWV_MLH'] = pw_da
ds['PWV_max'] = pw_da_max
ds['MLH'] = MLH
return ds
def wrap_xr_metpy_brunt_vaisala_f2(Height, PT, axis=0, verbose=False):
from metpy.calc import brunt_vaisala_frequency_squared
from metpy.units import units
try:
PT_unit = PT.attrs['units']
assert PT_unit == 'K'
except KeyError:
PT_unit = 'K'
if verbose:
print('assuming potential temperature units are degree kelvin...')
PT_values = PT.values * units('kelvin')
try:
H_unit = Height.attrs['units']
assert H_unit == 'm'
except KeyError:
H_unit = 'm'
if verbose:
print('assuming Height units are m...')
H_values = Height.values * units('m')
bvf2 = brunt_vaisala_frequency_squared(H_values, PT_values, axis=axis)
da = PT.copy(data=bvf2.magnitude)
da.name = 'BVF2'
da.attrs['units'] = '1/sec**2'
da.attrs['long_name'] = 'Brunt-Vaisala Frequency squared'
return da
def wrap_xr_metpy_virtual_temperature(T, MR, verbose=False):
from metpy.calc import virtual_temperature
from metpy.units import units
try:
T_unit = T.attrs['units']
assert T_unit == 'degC'
except KeyError:
T_unit = 'degC'
if verbose:
print('assuming temperature units are degree celsius...')
# convert to Kelvin:
T_values = T.values + 273.15
T_values = T_values * units('K')
try:
MR_unit = MR.attrs['units']
assert MR_unit == 'kg/kg'
except KeyError:
MR_unit = 'kg/kg'
if verbose:
print('assuming mixing ratio units are gr/kg...')
MR_values = MR.values * units(MR_unit)
Theta = virtual_temperature(T_values, MR_values)
da = MR.copy(data=Theta.magnitude) #/ 1000 # fixing for g/kg
da.name = 'VPT'
da.attrs['units'] = 'K'
da.attrs['long_name'] = 'Virtual Potential Temperature'
return da
def wrap_xr_metpy_virtual_potential_temperature(P, T, MR, verbose=False):
from metpy.calc import virtual_potential_temperature
from metpy.units import units
try:
P_unit = P.attrs['units']
assert P_unit == 'hPa'
except KeyError:
P_unit = 'hPa'
if verbose:
print('assuming pressure units are hpa...')
P_values = P.values * units(P_unit)
try:
T_unit = T.attrs['units']
assert T_unit == 'degC'
except KeyError:
T_unit = 'degC'
if verbose:
print('assuming temperature units are degree celsius...')
# convert to Kelvin:
T_values = T.values + 273.15
T_values = T_values * units('K')
try:
MR_unit = MR.attrs['units']
assert MR_unit == 'kg/kg'
except KeyError:
MR_unit = 'kg/kg'
if verbose:
print('assuming mixing ratio units are gr/kg...')
MR_values = MR.values * units(MR_unit)
Theta = virtual_potential_temperature(P_values, T_values, MR_values)
da = P.copy(data=Theta.magnitude)# / 1000 # fixing for g/kg
da.name = 'VPT'
da.attrs['units'] = 'K'
da.attrs['long_name'] = 'Virtual Potential Temperature'
return da
def wrap_xr_metpy_potential_temperature(P, T, verbose=False):
from metpy.calc import potential_temperature
from metpy.calc import exner_function
from metpy.units import units
try:
P_unit = P.attrs['units']
assert P_unit == 'hPa'
except KeyError:
P_unit = 'hPa'
if verbose:
print('assuming pressure units are hpa...')
P_values = P.values * units(P_unit)
# try:
# T_unit = T.attrs['units']
# assert T_unit == 'degC'
# except KeyError:
# T_unit = 'degC'
# if verbose:
# print('assuming temperature units are degree celsius...')
# convert to Kelvin:
# T_values = T.values + 273.15
# T_values = T_values * units('K')
# Theta = potential_temperature(P_values, T)
Theta = T / exner_function(P_values)
da = P.copy(data=Theta.values)
da.name = 'PT'
da.attrs['units'] = T.attrs['units']
da.attrs['long_name'] = 'Potential Temperature'
return da
def wrap_xr_metpy_vapor_pressure(P, MR, verbose=False):
from metpy.calc import vapor_pressure
from metpy.units import units
try:
P_unit = P.attrs['units']
assert P_unit == 'hPa'
except KeyError:
P_unit = 'hPa'
if verbose:
print('assuming pressure units are hPa...')
try:
MR_unit = MR.attrs['units']
assert MR_unit == 'kg/kg'
except KeyError:
MR_unit = 'kg/kg'
if verbose:
print('assuming mixing ratio units are kg/kg...')
P_values = P.values * units(P_unit)
MR_values = MR.values * units(MR_unit)
VP = vapor_pressure(P_values, MR_values)
da = P.copy(data=VP.magnitude)
da.attrs['units'] = P_unit
da.attrs['long_name'] = 'Water vapor partial pressure'
return da
def wrap_xr_metpy_mixing_ratio(P, T, RH, verbose=False):
from metpy.calc import mixing_ratio_from_relative_humidity
import numpy as np
from metpy.units import units
if np.max(RH) > 1.2:
RH_values = RH.values / 100.0
else:
RH_values = RH.values * units('dimensionless')
try:
T_unit = T.attrs['units']
assert T_unit == 'degC'
except KeyError:
T_unit = 'degC'
if verbose:
print('assuming temperature units are degC...')
T.attrs['units'] = T_unit
try:
P_unit = P.attrs['units']
assert P_unit == 'hPa'
except KeyError:
P_unit = 'hPa'
if verbose:
print('assuming pressure units are hPa...')
T_values = T.values * units(T_unit)
P_values = P.values * units(P_unit)
mixing_ratio = mixing_ratio_from_relative_humidity(
RH_values, T_values, P_values)
da = T.copy(data=mixing_ratio.magnitude)
da.name = 'MR'
da.attrs['units'] = 'kg/kg'
da.attrs['long_name'] = 'Water vapor mass mixing ratio'
return da
def wrap_xr_metpy_density(P, T, MR, verbose=False):
from metpy.calc import density
from metpy.units import units
try:
MR_unit = MR.attrs['units']
except KeyError:
MR_unit = 'g/kg'
if verbose:
print('assuming mixing ratio units are g/kg...')
MR.attrs['units'] = MR_unit
try:
T_unit = T.attrs['units']
assert T_unit == 'degC'
except KeyError:
T_unit = 'degC'
if verbose:
print('assuming temperature units are degC...')
T.attrs['units'] = T_unit
try:
P_unit = P.attrs['units']
assert P_unit == 'hPa'
except KeyError:
P_unit = 'hPa'
if verbose:
print('assuming pressure units are hPa...')
T_values = T.values * units(T_unit)
P_values = P.values * units(P_unit)
MR_values = MR.values * units(MR_unit)
Rho = density(P_values, T_values, MR_values)
Rho = Rho.to('g/m^3')
da = P.copy(data=Rho.magnitude)
da.attrs['units'] = 'g/m^3'
da.attrs['long_name'] = 'Air density'
return da
def wrap_xr_metpy_dewpoint(T, RH, verbose=False):
import numpy as np
from metpy.calc import dewpoint_from_relative_humidity
from metpy.units import units
if np.max(RH) > 1.2:
RH_values = RH.values / 100.0
else:
RH_values = RH.values
try:
T_unit = T.attrs['units']
assert T_unit == 'degC'
except KeyError:
T_unit = 'degC'
if verbose:
print('assuming temperature units are degC...')
T.attrs['units'] = T_unit
T_values = T.values * units(T_unit)
dewpoint = dewpoint_from_relative_humidity(T_values, RH_values)
da = T.copy(data=dewpoint.magnitude)
da.attrs['units'] = T_unit
da.attrs['long_name'] = 'Dew point'
return da
class Constants:
def __init__(self):
import astropy.units as u
# Specific gas const for water vapour, J kg^{-1} K^{-1}:
self.Rs_v = 461.52 * u.joule / (u.kilogram * u.Kelvin)
# Specific gas const for dry air, J kg^{-1} K^{-1}:
self.Rs_da = 287.05 * u.joule / (u.kilogram * u.Kelvin)
self.MW_dry_air = 28.9647 * u.gram / u.mol # gr/mol
self.MW_water = 18.015 * u.gram / u.mol # gr/mol
self.Water_Density = 1000.0 * u.kilogram / u.m**3
self.Epsilon = self.MW_water / self.MW_dry_air # Epsilon=Rs_da/Rs_v;
def show(self):
from termcolor import colored
for attr, value in vars(self).items():
print(colored('{} : '.format(attr), color='blue', attrs=['bold']), end='')
print(colored('{:.2f}'.format(value), color='white', attrs=['bold']))
class WaterVaporVar:
def __init__(self, name, value, unit):
try:
setattr(self, name, value.values)
except AttributeError:
setattr(self, name, value)
if value is not None:
value = getattr(self, name)
setattr(self, name, value * unit)
class WaterVapor:
def __init__(self,
Z=None,
P=None,
T=None,
DWPT=None,
MR=None,
Q=None,
RH=None,
PPMV=None,
MD=None,
KMOL=None,
ND=None,
PP=None,
verbose=True):
import astropy.units as u
self.verbose = verbose
self.Z = WaterVaporVar('Z', Z, u.meter).Z # height in meters
self.P = WaterVaporVar('P', P, u.hPa).P # pressure in hPa
self.T = WaterVaporVar('T', T, u.deg_C).T # temperature in deg_C
self.DWPT = WaterVaporVar('DWPT', DWPT, u.deg_C).DWPT # dew_point in deg_C
self.MR = WaterVaporVar('MR', MR, u.gram / u.kilogram).MR # mass_mixing_ratio in gr/kg
self.Q = WaterVaporVar('Q', Q, u.gram / u.kilogram).Q # specific humidity in gr/kg
self.RH = WaterVaporVar('RH', RH, u.percent).RH # relative humidity in %
self.PPMV = WaterVaporVar('PPMV', PPMV, u.cds.ppm).PPMV # volume_mixing_ratio in ppm
self.MD = WaterVaporVar('MD', MD, u.gram / u.meter**3).MD # water vapor density in gr/m^3
self.KMOL = WaterVaporVar('KMOL', KMOL, u.kilomole / u.cm**2).KMOL # water vapor column density in kmol/cm^2
self.ND = WaterVaporVar('ND', ND, u.dimensionless_unscaled / u.m**3).ND # number density in molecules / m^3
self.PP = WaterVaporVar('PP', PP, u.hPa).PP # water vapor partial pressure in hPa
# update attrs from dict containing keys as attrs and vals as attrs vals
# to be updated
def from_dict(self, d):
self.__dict__.update(d)
return self
def show(self, name='all'):
from termcolor import colored
if name == 'all':
for attr, value in vars(self).items():
print(colored('{} : '.format(attr), color='blue', attrs=['bold']), end='')
print(colored(value, color='white', attrs=['bold']))
elif hasattr(self, name):
print(colored('{} : '.format(name), color='blue', attrs=['bold']), end='')
print(colored(self.name, color='white', attrs=['bold']))
def convert(self, from_to='PP_to_MR'):
import astropy.units as u
C = Constants()
from_name = from_to.split('_')[0]
to_name = from_to.split('_')[-1]
from_ = getattr(self, from_name)
to_ = getattr(self, to_name)
print('converting {} to {}:'.format(from_name, to_name))
if to_ is not None:
if self.verbose:
print('{} already exists, overwriting...'.format(to_name))
if from_ is not None:
if from_name == 'PP' and to_name == 'MR':
# convert wv partial pressure to mass mixing ratio:
if self.P is None:
raise Exception('total pressure is needed for this conversion')
self.MR = C.Epsilon * self.PP / (self.P - self.PP) / self.MR.unit.decompose()
return self.MR
elif from_name == 'MR' and to_name == 'PP':
# convert mass mixing ratio to wv partial pressure:
if self.P is None:
raise Exception('total pressure is needed for this conversion')
e_tag = self.MR * self.MR.unit.decompose() / (C.Epsilon)
self.PP = self.P * e_tag / (1. * e_tag.unit + e_tag)
return self.PP
else:
raise Exception('{} is needed to perform conversion'.format(from_))
return self
def check_sound_time_datetime(dt):
import pandas as pd
if dt.hour >= 21 and dt.hour <= 23:
sound_time = (
dt +
pd.Timedelta(
1,
unit='d')).replace(
hour=0,
minute=0,
second=0)
elif dt.hour >= 0 and dt.hour <= 2:
sound_time = dt.replace(hour=0, minute=0, second=0)
elif dt.hour >= 9 and dt.hour <= 14:
sound_time = dt.replace(hour=12, minute=0, second=0)
else:
raise ValueError('{} time is not midnight nor noon'.format(dt))
return sound_time
def check_sound_time(df):
import pandas as pd
# check for validity of radiosonde air time, needs to be
# in noon or midnight:
if not df.between_time('22:00', '02:00').empty:
if df.index[0].hour <= 23 and df.index[0].hour >= 21:
sound_time = pd.to_datetime((df.index[0] + pd.Timedelta(1, unit='d')).strftime('%Y-%m-%d')).replace(hour=0, minute=0)
else:
sound_time = pd.to_datetime(df.index[0].strftime('%Y-%m-%d')).replace(hour=0, minute=0)
elif not df.between_time('10:00', '14:00').empty:
sound_time = pd.to_datetime(
df.index[0].strftime('%Y-%m-%d')).replace(hour=12, minute=0)
elif (df.between_time('22:00', '02:00').empty and
df.between_time('10:00', '14:00').empty):
raise ValueError(
'{} time is not midnight nor noon'.format(
df.index[0]))
return sound_time
#def calculate_tm_edt(wvpress, tempc, height, mixratio,
# press, method='mr_trapz'):
# def calculate_tm_with_method(method='mr_trapz'):
# import numpy as np
# import pandas as pd
# nonlocal wvpress, tempc, height, mixratio, press
# # conver Mixing ratio to WV-PP(e):
# MW_dry_air = 28.9647 # gr/mol
# MW_water = 18.015 # gr/mol
# Epsilon = MW_water / MW_dry_air # Epsilon=Rs_da/Rs_v;
# mr = pd.Series(mixratio)
# p = pd.Series(press)
# eps_tag = mr / (1000.0 * Epsilon)
# e = p * eps_tag / (1.0 + eps_tag)
# try:
# # directly use WV-PP with trapz:
# if method == 'wv_trapz':
# numerator = np.trapz(
# wvpress /
# (tempc +
# 273.15),
# height)
# denominator = np.trapz(
# wvpress / (tempc + 273.15)**2.0, height)
# tm = numerator / denominator
# # use WV-PP from mixing ratio with trapz:
# elif method == 'mr_trapz':
# numerator = np.trapz(
# e /
# (tempc +
# 273.15),
# height)
# denominator = np.trapz(
# e / (tempc + 273.15)**2.0, height)
# tm = numerator / denominator
# # use WV-PP from mixing ratio with sum:
# elif method == 'mr_sum':
# e_ser = pd.Series(e)
# height = pd.Series(height)
# e_sum = (e_ser.shift(-1) + e_ser).dropna()
# h = height.diff(-1).abs()
# numerator = 0.5 * (e_sum * h / (pd.Series(tempc) + 273.15)).sum()
# denominator = 0.5 * \
# (e_sum * h / (pd.Series(tempc) + 273.15)**2.0).sum()
# tm = numerator / denominator
# except ValueError:
# return np.nan
# return tm
# if method is None:
# tm_wv_trapz = calculate_tm_with_method('wv_trapz')
# tm_mr_trapz = calculate_tm_with_method('mr_trapz')
# tm_sum = calculate_tm_with_method('sum')
# else:
# tm = calculate_tm_with_method(method)
# chosen_method = method
# return tm, chosen_method
#
#
#def calculate_tpw_edt(mixratio, rho, rho_wv, height, press, g, method='trapz'):
# def calculate_tpw_with_method(method='trapz'):
# nonlocal mixratio, rho, rho_wv, height, press, g
# import numpy as np
# import pandas as pd
# # calculate specific humidity q:
# q = (mixratio / 1000.0) / \
# (1 + 0.001 * mixratio / 1000.0)
# try:
# if method == 'trapz':
# tpw = np.trapz(q * rho,
# height)
# elif method == 'sum':
# rho_wv = pd.Series(rho_wv)
# height = pd.Series(height)
# rho_sum = (rho_wv.shift(-1) + rho_wv).dropna()
# h = height.diff(-1).abs()
# tpw = 0.5 * (rho_sum * h).sum()
# elif method == 'psum':
# q = pd.Series(q)
# q_sum = q.shift(-1) + q
# p = pd.Series(press) * 100.0 # to Pa
# dp = p.diff(-1).abs()
# tpw = (q_sum * dp / (2.0 * 1000 * g)).sum() * 1000.0
# except ValueError:
# return np.nan
# return tpw
# if method is None:
# # calculate all the methods and choose trapz if all agree:
# tpw_trapz = calculate_tpw_with_method(method='trapz')
# tpw_sum = calculate_tpw_with_method(method='sum')
# tpw_psum = calculate_tpw_with_method(method='psum')
# else:
# tpw = calculate_tpw_with_method(method=method)
# chosen_method = method
# return tpw, chosen_method
def read_all_deserve_soundings(path=des_path, savepath=None):
from aux_gps import path_glob
import xarray as xr
from aux_gps import save_ncfile
radio_path = path / 'radiosonde'
files = path_glob(radio_path, '*.txt')
mas_files = [x for x in files if 'MAS' in x.as_posix()]
maz_files = [x for x in files if 'MAZ' in x.as_posix()]
ds_list = []
for file in mas_files:
ds_list.append(read_one_deserve_record(file))
ds_mas = xr.concat(ds_list, 'sound_time')
ds_mas = ds_mas.sortby('sound_time')
ds_list = []
for file in maz_files:
ds_list.append(read_one_deserve_record(file))
ds_maz = xr.concat(ds_list, 'sound_time')
ds_maz = ds_maz.sortby('sound_time')
if savepath is not None:
filename = 'deserve_massada_sounding_2014-2014.nc'
save_ncfile(ds_mas, savepath, filename)
filename = 'deserve_mazzra_sounding_2014-2014.nc'
save_ncfile(ds_maz, savepath, filename)
return ds_mas, ds_maz
def get_loc_sound_time_from_deserve_filepath(filepath):
import pandas as pd
txt_filepath = filepath.as_posix().split('/')[-1]
loc = txt_filepath.split('_')[0]
sound_time = txt_filepath.split('_')[1]
sound_time = pd.to_datetime(sound_time, format='%Y%m%d%H')
return loc, sound_time
def read_one_deserve_record(filepath):
import pandas as pd
loc, sound_time = get_loc_sound_time_from_deserve_filepath(filepath)
df = pd.read_csv(filepath, header=None, skiprows=1, delim_whitespace=True)
df.columns = [
'time',
'P',
'T',
'RH',
'WS',
'WD',
'lon',
'lat',
'alt',
'Dewpt',
'vi_te']
units = [
'mm:ss',
'hPa',
'degC',
'%',
'm/s',
'deg',
'deg',
'deg',
'm',
'degC',
'degC']
units_dict = dict(zip(df.columns, units))
df['time'] = '00:' + df['time'].astype(str)
df['time'] = | pd.to_timedelta(df['time'], errors='coerce', unit='sec') | pandas.to_timedelta |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([False, True, True])))
def test_is_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [4,5,6]
}).equals(pandas.Series([False, False, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([True, True, False])))
def test_is_not_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([False, False, True])))
def test_is_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
def test_is_not_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
def test_prefix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).prefix_matches_regex({
"target": "--r2",
"comparator": "w.*",
"prefix": 2
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([False, False])))
def test_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).suffix_matches_regex({
"target": "--r1",
"comparator": "es.*",
"suffix": 3
}).equals(pandas.Series([False, True])))
self.assertTrue(DataframeType({"value": df}).suffix_matches_regex({
"target": "var1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([False, False])))
def test_not_prefix_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_prefix_matches_regex({
"target": "--r1",
"comparator": ".*",
"prefix": 2
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).not_prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([True, True])))
def test_not_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_suffix_matches_regex({
"target": "var1",
"comparator": ".*",
"suffix": 3
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_suffix_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([True, True])))
def test_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).matches_regex({
"target": "--r1",
"comparator": ".*",
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).matches_regex({
"target": "var2",
"comparator": "[0-9].*",
}).equals(pandas.Series([False, False])))
def test_not_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_matches_regex({
"target": "var1",
"comparator": ".*",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
}).equals(pandas.Series([True, True])))
def test_starts_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).starts_with({
"target": "--r1",
"comparator": "WO",
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).starts_with({
"target": "var2",
"comparator": "ABC",
}).equals(pandas.Series([False, False])))
def test_ends_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).ends_with({
"target": "--r1",
"comparator": "abc",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).ends_with({
"target": "var1",
"comparator": "est",
}).equals(pandas.Series([False, True])))
def test_has_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([True, False])))
def test_has_not_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_not_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([False, True])))
def test_longer_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
def test_longer_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_shorter_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'val']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
def test_shorter_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_all({
"target": "--r1",
"comparator": "--r2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_not_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_invalid_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2099'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).invalid_date({"target": "--r1"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var3"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var2"})
.equals(pandas.Series([False, False, False, True, True])))
def test_date_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var1", "comparator": '2021'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "1997-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([False, False, True, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).date_equal_to({"target": "--r3", "comparator": "--r4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "minute"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "second"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "microsecond"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_not_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals( | pandas.Series([True, True, True, True, True]) | pandas.Series |
"""
This script is used to pre-process the dataeset in our center.
1.Transform the .mat files to one .npy file
2. Give labels to each subject, concatenate at the first column
3. Randomly splitting the whole data into training and validation
"""
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import numpy as np
import pandas as pd
import os
from eslearn.utils.lc_read_write_Mat import read_mat
# Inputs
matroot = r'D:\WorkStation_2018\SZ_classification\Data\SelectedFC550' # all mat files directory
scale = r'D:\WorkStation_2018\SZ_classification\Scale\10-24大表.xlsx' # whole scale path
uid_unmedicated_and_firstepisode = r'D:\WorkStation_2018\SZ_classification\Scale\uid_unmedicated_and_firstepisode.txt'
uid_sz_chronic_drugtaking_morethan6mon = r'D:\WorkStation_2018\SZ_classification\Scale\精分-非首发用药-病程大于6月.txt'
n_node = 246 # number of nodes in the mat network
#%% Transform the .mat files to one .npy file
allmatpath = os.listdir(matroot)
allmatpath = [os.path.join(matroot, matpath) for matpath in allmatpath]
mask = np.triu(np.ones(n_node),1)==1
allmat = [read_mat(matpath)[mask].T for matpath in allmatpath]
allmat = pd.DataFrame(np.float32(allmat))
# Give labels to each subject, concatenate at the first column
uid = [os.path.basename(matpath) for matpath in allmatpath]
uid = pd.Series(uid)
uid = uid.str.findall('([1-9]\d*)')
uid = pd.DataFrame([np.int(id[0]) for id in uid])
scale = pd.read_excel(scale)
selected_diagnosis = pd.merge(uid, scale, left_on=0, right_on='folder', how='inner')[['folder','诊断']]
age_sex = pd.merge(uid, scale, left_on=0, right_on='folder', how='inner')[['folder', '诊断', '年龄','性别']]
# Giving large label to SZ
selected_diagnosis[selected_diagnosis==1] = 0
selected_diagnosis[selected_diagnosis==3] = 1
allmat_plus_label = pd.concat([selected_diagnosis, allmat],axis=1)
# print(allmat_plus_label)
#np.save(r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Data\ML_data_npy\dataset_550.npy',allmat_plus_label)
#%% Extract validation dataset that contains first episode unmedicated patients
# unmedicated
uid_unmedicated_and_firstepisode = pd.read_csv(uid_unmedicated_and_firstepisode, header=None)
data_unmedicated_and_firstepisode_550 = allmat_plus_label[allmat_plus_label['folder'].isin(uid_unmedicated_and_firstepisode[0])]
cov_unmedicated_and_firstepisode = age_sex[age_sex['folder'].isin(uid_unmedicated_and_firstepisode[0])]
# HC: matching hc and sz
from scipy.stats import ttest_ind
from eslearn.statistics.lc_chisqure import lc_chisqure
cov_hc_for_matching_unmedicated_and_firstepisode = age_sex[age_sex['诊断'] == 1]
np.random.seed(11)
idx_rand = np.random.permutation(len(cov_hc_for_matching_unmedicated_and_firstepisode))
cov_hc = cov_hc_for_matching_unmedicated_and_firstepisode.iloc[idx_rand[:len(cov_unmedicated_and_firstepisode)],:]
# Check if matching
ttest_ind(cov_unmedicated_and_firstepisode['年龄'], cov_hc['年龄'])
lc_chisqure([44, 44], [np.sum(cov_unmedicated_and_firstepisode['性别'] == 1), np.sum(cov_hc['性别'] == 1)])
# Get data and save
data_hc_for_matching_unmedicated_and_firstepisode_550 = allmat_plus_label[allmat_plus_label['folder'].isin(cov_hc['folder'])]
data_all = np.concatenate([data_unmedicated_and_firstepisode_550, data_hc_for_matching_unmedicated_and_firstepisode_550])
# np.save(r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\dataset_unmedicated_and_firstepisode_550.npy', data_all)
#%% Generate demographic table for Unmedicated and the matching HC
uid_unmedicated_file = r'D:\WorkStation_2018\SZ_classification\Scale\uid_unmedicated_and_firstepisode.txt'
uid_unmedicated = pd.read_csv(uid_unmedicated_file, header=None, dtype=np.int32)
uid_unmedicated_sz_hc = pd.concat([cov_hc['folder'], uid_unmedicated])
scale_unmedicated_hc = pd.merge(scale, uid_unmedicated_sz_hc, left_on='folder', right_on=0, how='inner')[['folder', '诊断', '年龄','性别', '病程月']]
des_unmedicated_hc = scale_unmedicated_hc.describe()
#%% Extract covariances for all: age and sex
cov = pd.merge(uid, scale, left_on=0, right_on='folder', how='inner')[['folder','诊断', '年龄', '性别']]
cov['诊断'] = selected_diagnosis['诊断']
cov['性别'] = np.int32(cov['性别'] == 2)
cov.columns = ['folder', 'diagnosis', 'age', 'sex']
cov.to_csv(r'D:\WorkStation_2018\SZ_classification\Scale\cov_550.txt', index=False)
#%% Extract covariances for unmedicated patients ans matched HC: age and sex
cov_unmedicated_sz_and_matched_hc = | pd.merge(uid_unmedicated_sz_hc, cov, left_on=0, right_on='folder', how='inner') | pandas.merge |
import numpy as np
import pandas as pd
import hdf5storage
import h5py
from scipy.signal import find_peaks
import os
import sys
def set_size(width, fraction=1, subplots=(1, 1)):
"""Set figure dimensions to avoid scaling in LaTeX.
Parameters
----------
width: float or string
Document width in points, or string of predined document type
fraction: float, optional
Fraction of the width which you wish the figure to occupy
subplots: array-like, optional
The number of rows and columns of subplots.
Returns
-------
fig_dim: tuple
Dimensions of figure in inches
"""
if width == 'thesis':
width_pt = 426.79135
elif width == 'beamer':
width_pt = 307.28987
else:
width_pt = width
# Width of figure (in pts)
fig_width_pt = width_pt * fraction
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Golden ratio to set aesthetic figure height
# https://disq.us/p/2940ij3
golden_ratio = (5**.5 - 1) / 2
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * golden_ratio * (subplots[0] / subplots[1])
return (fig_width_in, fig_height_in)
def loadXML(path):
"""
path should be the folder session containing the XML file
Function returns :
1. the number of channels
2. the sampling frequency of the dat file or the eeg file depending of what is present in the folder
eeg file first if both are present or both are absent
3. the mappings shanks to channels as a dict
Args:
path : string
Returns:
int, int, dict
"""
if not os.path.exists(path):
print("The path "+path+" doesn't exist; Exiting ...")
sys.exit()
listdir = os.listdir(path)
xmlfiles = [f for f in listdir if f.endswith('.xml')]
if not len(xmlfiles):
print("Folder contains no xml files; Exiting ...")
sys.exit()
new_path = os.path.join(path, xmlfiles[0])
from xml.dom import minidom
xmldoc = minidom.parse(new_path)
nChannels = xmldoc.getElementsByTagName('acquisitionSystem')[0].getElementsByTagName('nChannels')[0].firstChild.data
fs_dat = xmldoc.getElementsByTagName('acquisitionSystem')[0].getElementsByTagName('samplingRate')[0].firstChild.data
fs = xmldoc.getElementsByTagName('fieldPotentials')[0].getElementsByTagName('lfpSamplingRate')[0].firstChild.data
shank_to_channel = {}
groups = xmldoc.getElementsByTagName('anatomicalDescription')[0].getElementsByTagName('channelGroups')[0].getElementsByTagName('group')
for i in range(len(groups)):
shank_to_channel[i] = np.sort([int(child.firstChild.data) for child in groups[i].getElementsByTagName('channel')])
return int(nChannels), int(fs), shank_to_channel
def loadLFP(path, n_channels=90, channel=64, frequency=1250.0, precision='int16'):
if type(channel) is not list:
f = open(path, 'rb')
startoffile = f.seek(0, 0)
endoffile = f.seek(0, 2)
bytes_size = 2
n_samples = int((endoffile-startoffile)/n_channels/bytes_size)
duration = n_samples/frequency
interval = 1/frequency
f.close()
with open(path, 'rb') as f:
data = np.fromfile(f, np.int16).reshape((n_samples, n_channels))[:,channel]
timestep = np.arange(0, len(data))/frequency
# check if lfp time stamps exist
lfp_ts_path = os.path.join(os.path.dirname(os.path.abspath(path)),'lfp_ts.npy')
if os.path.exists(lfp_ts_path):
timestep = np.load(lfp_ts_path).reshape(-1)
return data, timestep # nts.Tsd(timestep, data, time_units = 's')
elif type(channel) is list:
f = open(path, 'rb')
startoffile = f.seek(0, 0)
endoffile = f.seek(0, 2)
bytes_size = 2
n_samples = int((endoffile-startoffile)/n_channels/bytes_size)
duration = n_samples/frequency
f.close()
with open(path, 'rb') as f:
data = np.fromfile(f, np.int16).reshape((n_samples, n_channels))[:,channel]
timestep = np.arange(0, len(data))/frequency
# check if lfp time stamps exist
lfp_ts_path = os.path.join(os.path.dirname(os.path.abspath(path)),'lfp_ts.npy')
if os.path.exists(lfp_ts_path):
timestep = np.load(lfp_ts_path).reshape(-1)
return data,timestep # nts.TsdFrame(timestep, data, time_units = 's')
def get_session_path(session):
f = h5py.File(session+'.mat','r')
return f['session_path'][()].tobytes()[::2].decode()
def load_position(session):
f = h5py.File(session+'.mat','r')
# load frames [ts x y a s]
frames = np.transpose(np.array(f['frames']))
return pd.DataFrame(frames,columns=['ts', 'x', 'y', 'hd', 'speed'])
def get_epochs(path):
f = h5py.File(path,'r')
ex_ep = []
for i in range(f['events'].shape[0]):
ex_ep.append(f['events'][i])
return ex_ep
def get_maze_size_cm(path):
f = h5py.File(path,'r')
maze_size_cm = []
for i in range(f['maze_size_cm'].shape[0]):
maze_size_cm.append(f['maze_size_cm'][i][0])
return maze_size_cm
def get_spikes(filename):
data = hdf5storage.loadmat(filename,variable_names=['Spikes'])
spike_times=data['Spikes']
spike_times=np.squeeze(spike_times)
for i in range(spike_times.shape[0]):
spike_times[i]=np.squeeze(spike_times[i])
return spike_times
def writeNeuroscopeEvents(path, ep, name):
f = open(path, 'w')
for i in range(len(ep)):
f.writelines(str(ep.as_units('ms').iloc[i]['start']) + " "+name+" start "+ str(1)+"\n")
#f.writelines(str(ep.as_units('ms').iloc[i]['peak']) + " "+name+" start "+ str(1)+"\n")
f.writelines(str(ep.as_units('ms').iloc[i]['end']) + " "+name+" end "+ str(1)+"\n")
f.close()
return
def fastrms(x,window=5):
window = np.ones(window)
power = x**2
rms = np.convolve(power,window,mode='same')
return np.sqrt(rms/sum(window))
def get_place_fields(ratemap,min_peak_rate=2,min_field_width=2,max_field_width=39,percent_threshold=0.2):
std_rates = np.std(ratemap)
locs,properties = find_peaks(fastrms(ratemap), height=min_peak_rate, width=min_field_width)
pks = properties['peak_heights']
exclude = []
for j in range(len(locs)-1):
if min(ratemap[locs[j]:locs[j+1]]) > ((pks[j] + pks[j+1]) / 2) * percent_threshold:
if pks[j] > pks[j+1]:
exclude.append(j+1)
elif pks[j] < pks[j+1]:
exclude.append(j)
if any(ratemap[locs] < std_rates*.5):
exclude.append(np.where(ratemap[locs] < std_rates*.5))
if not exclude:
pks = np.delete(pks, exclude)
locs = np.delete(locs, exclude)
fields = []
for j in range(len(locs)):
Map_Field = (ratemap > pks[j] * percent_threshold)*1
start = locs[j]
stop = locs[j]
while (Map_Field[start] == 1) & (start > 0):
start -= 1
while (Map_Field[stop] == 1) & (stop < len(Map_Field)-1):
stop += 1
if ((stop - start) > min_field_width) & ((stop - start) < max_field_width):
com = start
while sum(ratemap[start:stop]) - sum(ratemap[start:com]) > sum(ratemap[start:com])/2:
com += 1
fields.append((start,stop,stop - start,pks[j],locs[j],com))
# add to data frames
fields = | pd.DataFrame(fields, columns=("start", "stop", "width", "peakFR", "peakLoc", "COM")) | pandas.DataFrame |
import json
from abc import abstractmethod, ABC
from omegaconf import DictConfig
import numpy as np
import pandas as pd
from common.shape import (
Shape,
Circle,
Polygon,
ListWaypoint,
# ListLanePoint
Polyline
)
class MapComponent:
def __init__(
self,
config: DictConfig,
shape: Shape
):
self._config = config
self._shape = shape
self.data = self._get_data()
@abstractmethod
def _get_data(self):
"""
This should return a pd.DataFrame
each row is a point of polygon / list waypoints
with columns like:
| id | type | x | y | status
Returns:
pd.DataFrame
"""
pass
class CrossWalk(MapComponent, ABC):
def __init__(
self,
config: DictConfig,
shape: Polygon
):
"""
Class contains a polygon of crosswalk
Args:
config (DictConfig): configuration
shape (Polygon): a polygon of crosswalk
"""
super().__init__(config, shape)
def _get_data(self):
# get x, y only
_points = self._shape.points[:, :2]
num_row = len(_points)
# id will be assigned later in MapHandler
_id = np.zeros((num_row, 1))
_type = np.array(["crosswalk"] * num_row).reshape((-1, 1))
# fill nan for status column
_status = np.empty((num_row, 1))
_status.fill(np.nan)
data = np.concatenate([_id, _type, _points, _status], axis=1)
df = | pd.DataFrame(data=data, columns=self._config.storage.static.columns) | pandas.DataFrame |
import os
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal, assert_series_equal
from mavedbconvert import empiric, constants
from tests import ProgramTestCase
class TestEmpiricInit(ProgramTestCase):
def setUp(self):
super().setUp()
self.path = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
def test_offset_inframe(self):
empiric.Empiric(src=self.path, wt_sequence="ATC", offset=3)
def test_error_offset_not_inframe(self):
with self.assertRaises(ValueError):
empiric.Empiric(src=self.path, wt_sequence="ATC", offset=1)
def test_error_noncoding(self):
with self.assertRaises(ValueError):
empiric.Empiric(src=self.path, wt_sequence="ATC", is_coding=False)
class TestInferProEvent(unittest.TestCase):
def test_infers_equal_event(self):
self.assertEqual(
empiric.infer_pro_substitution(mut_aa="V", wt_aa="v", codon_pos=0),
"p.Val1=",
)
def test_infers_sub_event_event(self):
self.assertEqual(
empiric.infer_pro_substitution(mut_aa="V", wt_aa="F", codon_pos=0),
"p.Phe1Val",
)
def test_converts_triple_q_to_Xaa(self):
self.assertEqual(
empiric.infer_pro_substitution(mut_aa="?", wt_aa="V", codon_pos=0),
"p.Val1Xaa",
)
class TestInferNTEvent(unittest.TestCase):
def test_infers_equal_event(self):
self.assertEqual(
empiric.infer_nt_substitution(wt_codon="aaa", mut_codon="AAA", codon_pos=0),
"c.[1=;2=;3=]",
)
def test_infers_sub_event_event(self):
self.assertEqual(
empiric.infer_nt_substitution(wt_codon="ATC", mut_codon="GTA", codon_pos=0),
"c.[1A>G;2=;3C>A]",
)
def test_adds_codon_pos_multiplied_by_3_to_position(self):
self.assertEqual(
empiric.infer_nt_substitution(wt_codon="ATC", mut_codon="GTA", codon_pos=1),
"c.[4A>G;5=;6C>A]",
)
class TestEmpiric(ProgramTestCase):
def setUp(self):
super().setUp()
self.input = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.empiric = empiric.Empiric(
src=self.input, wt_sequence="AAA", one_based=False
)
def test_error_missing_amino_acid(self):
for nan in constants.extra_na:
df = pd.DataFrame({"Position": [0], "Amino Acid": [nan], "row_num": [0]})
self.empiric.validate_columns(df)
with self.assertRaises(ValueError):
self.empiric.parse_row(row=df.iloc[0, :])
def test_value_error_codon_doesnt_match_aa_column(self):
with self.assertRaises(ValueError):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["V"], "Codon": ["AAT"], "row_num": [0]}
)
self.empiric.validate_columns(df)
self.empiric.parse_row(row=df.iloc[0, :])
def test_error_infer_nt_true_but_missing_codon_value(self):
for nan in constants.extra_na:
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["N"], "row_num": [0], "Codon": [nan]}
)
self.empiric.validate_columns(df)
with self.assertRaises(ValueError):
self.empiric.parse_row(row=df.iloc[0, :])
def test_index_error_negative_position(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["K"], "row_num": [0], "Codon": ["AAA"]}
)
self.empiric.validate_columns(df)
self.empiric.one_based = True
with self.assertRaises(IndexError):
self.empiric.parse_row(row=df.iloc[0, :])
def test_index_error_out_of_codon_bounds(self):
df = pd.DataFrame(
{"Position": [56], "Amino Acid": ["K"], "row_num": [0], "Codon": ["AAA"]}
)
self.empiric.validate_columns(df)
with self.assertRaises(IndexError):
self.empiric.parse_row(row=df.iloc[0, :])
def test_amino_acid_column_is_case_insensitive(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["v"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
_, hgvs_pro = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_pro, "p.Lys1Val")
def test_infers_hgvs_pro_event_from_one_based_position(self):
df = pd.DataFrame(
{"Position": [1], "Amino Acid": ["V"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
self.empiric.one_based = True
_, hgvs_pro = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_pro, "p.Lys1Val")
def test_infers_hgvs_pro_event_from_zero_based_position(self):
df = pd.DataFrame(
{"Position": [1], "Amino Acid": ["V"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
self.empiric.wt_sequence = "GTAAAA"
self.empiric.one_based = False
_, hgvs_pro = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_pro, "p.Lys2Val")
def test_protein_output_is_singular_when_inferring_nt(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["V"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
hgvs_nt, hgvs_pro = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_nt, "c.[1A>G;2A>T;3=]")
self.assertEqual(hgvs_pro, "p.Lys1Val")
def test_hgvs_nt_is_none_when_codon_is_not_in_axes(self):
df = pd.DataFrame({"Position": [0], "Amino Acid": ["V"], "row_num": [0]})
self.empiric.validate_columns(df)
hgvs_nt, _ = self.empiric.parse_row(row=df.iloc[0, :])
self.assertIsNone(hgvs_nt)
def test_correctly_infers_hgvs_nt_positions_when_zero_based(self):
df = pd.DataFrame(
{"Position": [1], "Amino Acid": ["V"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
self.empiric.one_based = False
self.empiric.wt_sequence = "GGGAAT"
hgvs_nt, _ = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_nt, "c.[4A>G;5A>T;6T>A]")
def test_correctly_infers_hgvs_nt_positions_when_one_based(self):
df = pd.DataFrame({"Position": [1], "Amino Acid": ["N"], "Codon": ["AAT"]})
self.empiric.validate_columns(df)
self.empiric.one_based = True
self.empiric.wt_sequence = "GTAAAA"
hgvs_nt, _ = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_nt, "c.[1G>A;2T>A;3A>T]")
class TestEmpiricValidateColumns(ProgramTestCase):
def setUp(self):
super().setUp()
self.input = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.empiric = empiric.Empiric(
src=self.input, wt_sequence="AAA", one_based=False
)
def test_error_cannot_find_case_insensitive_aa_column(self):
df = pd.DataFrame({"Position": [1], "aa": ["N"], "Codon": ["AAT"]})
with self.assertRaises(ValueError):
self.empiric.validate_columns(df)
def test_error_cannot_find_case_insensitive_position_column(self):
df = pd.DataFrame({"pos": [1], "Amino Acid": ["N"], "Codon": ["AAT"]})
with self.assertRaises(ValueError):
self.empiric.validate_columns(df)
def test_sets_codon_column_as_none_if_not_present(self):
df = pd.DataFrame({"Position": [1], "Amino Acid": ["N"]})
self.empiric.validate_columns(df)
self.assertEqual(self.empiric.codon_column, None)
def test_sets_codon_column_if_present(self):
df = pd.DataFrame({"Position": [1], "Amino Acid": ["N"], "Codon": ["AAT"]})
self.empiric.validate_columns(df)
self.assertEqual(self.empiric.codon_column, "Codon")
def test_sets_position_column(self):
df = pd.DataFrame({"Position": [1], "Amino Acid": ["N"], "Codon": ["AAT"]})
self.empiric.validate_columns(df)
self.assertEqual(self.empiric.position_column, "Position")
def test_sets_aa_column(self):
df = pd.DataFrame({"Position": [1], "amino acid": ["N"], "Codon": ["AAT"]})
self.empiric.validate_columns(df)
self.assertEqual(self.empiric.aa_column, "amino acid")
class TestEmpiricParseScoresInput(ProgramTestCase):
def setUp(self):
super().setUp()
self.input = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.empiric = empiric.Empiric(
src=self.input,
wt_sequence="AAA",
one_based=False,
input_type="scores",
score_column="A",
)
def test_deletes_position_amino_acid_codon_row_num_columns(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["N"], "Codon": ["AAT"], "A": [1.2]}
)
result = self.empiric.parse_input(df)
self.assertNotIn("Position", result.columns)
self.assertNotIn("Amino Acid", result.columns)
self.assertNotIn("Codon", result.columns)
self.assertNotIn("row_num", result.columns)
def test_keeps_additional_non_score_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertIn("B", result.columns)
def test_renames_score_column_to_score_and_drops_original(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertListEqual(list(df["A"]), list(result["score"]))
self.assertIn("B", result.columns)
self.assertNotIn("A", result.columns)
def test_sets_hgvs_pro_column(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertEqual(result[constants.pro_variant_col].values[0], "p.Lys1Asn")
def test_correctly_infers_hgvs_nt_column_when_codon_column_present(self):
df = pd.DataFrame(
{
"Position": [1],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
self.empiric.one_based = False
self.empiric.wt_sequence = "GGGAAA"
result = self.empiric.parse_input(df)
self.assertEqual(result[constants.nt_variant_col].values[0], "c.[4=;5=;6A>T]")
def test_orders_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertEqual(list(result.columns).index(constants.nt_variant_col), 0)
self.assertEqual(list(result.columns).index(constants.pro_variant_col), 1)
self.assertEqual(list(result.columns).index(constants.mavedb_score_column), 2)
def test_removes_null_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"B": [None],
"A": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertNotIn("B", result.columns)
def test_drops_nt_when_codon_column_is_not_provided(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["N"], "A": [1.2], "B": [2.4]}
)
result = self.empiric.parse_input(df)
self.assertNotIn(constants.nt_variant_col, result.columns)
def test_drops_non_numeric_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": ["a"],
}
)
result = self.empiric.parse_input(df)
self.assertNotIn("B", result.columns)
def test_keeps_int_type_as_int(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["N"], "Codon": ["AAT"], "A": [1]}
)
result = self.empiric.parse_input(df)
self.assertTrue(
np.issubdtype(
result[constants.mavedb_score_column].values[0], np.signedinteger
)
)
class TestEmpiricParseCountsInput(ProgramTestCase):
def setUp(self):
super().setUp()
self.input = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.empiric = empiric.Empiric(
src=self.input,
wt_sequence="AAA",
one_based=False,
input_type="counts",
score_column="A",
)
def test_orders_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertEqual(list(result.columns).index(constants.nt_variant_col), 0)
self.assertEqual(list(result.columns).index(constants.pro_variant_col), 1)
self.assertEqual(list(result.columns).index("A"), 2)
self.assertEqual(list(result.columns).index("B"), 3)
class TestEmpiricLoadInput(ProgramTestCase):
def setUp(self):
super().setUp()
self.excel_path = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.excel_header_footer_path = os.path.join(
self.data_dir, "empiric", "empiric_header_footer.xlsx"
)
self.csv_path = os.path.join(self.data_dir, "empiric", "tmp.csv")
self.tsv_path = os.path.join(self.data_dir, "empiric", "tmp.tsv")
self.excel_multisheet_path = os.path.join(
self.data_dir, "empiric", "empiric_multisheet.xlsx"
)
def test_extra_na_load_as_nan(self):
for value in constants.extra_na:
df = pd.read_excel(self.excel_path, engine="openpyxl")
df["A"] = [value] * len(df)
df.to_csv(self.csv_path, index=False)
e = empiric.Empiric(
src=self.csv_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
)
result = e.load_input_file()
expected = pd.Series([np.NaN] * len(df), index=df.index, name="A")
assert_series_equal(result["A"], expected)
def test_loads_first_sheet_by_default(self):
p = empiric.Empiric(
src=self.excel_multisheet_path,
wt_sequence="TTTTCTTATTGT",
score_column="score",
input_type=constants.score_type,
)
result = p.load_input_file()
expected = pd.read_excel(
self.excel_multisheet_path, na_values=constants.extra_na, engine="openpyxl"
)
assert_frame_equal(result, expected)
def test_loads_correct_sheet(self):
p = empiric.Empiric(
src=self.excel_multisheet_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
sheet_name="Sheet3",
)
result = p.load_input_file()
expected = pd.read_excel(
self.excel_multisheet_path,
na_values=constants.extra_na,
sheet_name="Sheet3",
engine="openpyxl",
)
assert_frame_equal(result, expected)
def test_error_missing_sheet(self):
p = empiric.Empiric(
src=self.excel_multisheet_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
sheet_name="BadSheet",
)
with self.assertRaises(KeyError):
p.load_input_file()
def test_handles_csv(self):
df = pd.read_excel(self.excel_path, engine="openpyxl")
df.to_csv(self.csv_path, index=False, sep=",")
e = empiric.Empiric(
src=self.csv_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
)
result = e.load_input_file()
assert_frame_equal(result, df)
def test_loads_with_skipped_rows(self):
p = empiric.Empiric(
src=self.excel_header_footer_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
skip_header_rows=2,
skip_footer_rows=2,
)
result = p.load_input_file()
df = pd.read_excel(self.excel_path, engine="openpyxl")
assert_frame_equal(result, df)
def test_handles_tsv(self):
df = pd.read_excel(self.excel_path, engine="openpyxl")
df.to_csv(self.tsv_path, index=False, sep="\t")
e = empiric.Empiric(
src=self.tsv_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
)
result = e.load_input_file()
assert_frame_equal(result, df)
def test_error_position_not_in_columns(self):
df = pd.read_excel(self.excel_path, engine="openpyxl")
df = df.drop(columns=["Position"])
df.to_csv(self.csv_path, index=False, sep="\t")
with self.assertRaises(ValueError):
e = empiric.Empiric(
src=self.csv_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
)
e.load_input_file()
def test_error_amino_acid_not_in_columns(self):
df = pd.read_excel(self.excel_path, engine="openpyxl")
df = df.drop(columns=["Amino Acid"])
df.to_csv(self.csv_path, index=False, sep="\t")
with self.assertRaises(ValueError):
e = empiric.Empiric(
src=self.csv_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
)
e.load_input_file()
def test_not_scores_column_but_input_type_is_scores(self):
with self.assertRaises(ValueError):
empiric.Empiric(
src=self.csv_path,
wt_sequence="TTTTCTTATTGT",
score_column=None,
input_type=constants.score_type,
one_based=False,
)
def test_applies_offset_to_position_column(self):
e = empiric.Empiric(
src=self.excel_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
offset=-9,
)
result = e.load_input_file()
self.assertListEqual(list(result[e.position_column]), [3, 4, 5])
class TestEmpiricConvert(ProgramTestCase):
def setUp(self):
super().setUp()
self.excel_path = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.expected = os.path.join(self.data_dir, "empiric", "empiric_expected.csv")
self.empiric = empiric.Empiric(
src=self.excel_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
)
def test_saves_to_dst(self):
self.empiric.convert()
self.assertTrue(os.path.isfile(self.empiric.output_file))
def test_integration(self):
self.empiric = empiric.Empiric(
src=self.excel_path,
wt_sequence="TCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
)
self.empiric.convert()
assert_frame_equal(
pd.read_csv(self.empiric.output_file, delimiter=","),
| pd.read_csv(self.expected, delimiter=",") | pandas.read_csv |
import glob
import json
from pathlib import Path
import pandas as pd
import numpy as np
import os
import numpy as np
import pandas as pd
from itertools import groupby
CAP = 1500
SEED = 1234
np.random.seed(SEED)
class dataset:
def write_unique_ids(self, out_file):
"""
Write the unique IDs to a file, but add a self.prefix to each element of the array.
For example, if self.unique_ids is
['image_1.jpg', 'image_2.jpg']
then if the self.prfix is './folder/', then out_file would be written as
./folder/image_1.jpg
./folder/image_2.jpg
"""
with open(out_file,'w') as f:
f.writelines([self.prefix+x+'\n' for x in self.unique_ids])
return
def read_unique_ids(self, in_file, prefix=None):
"""
Read the unique IDs from in_file, but remove a self.prefix from each element of the array.
For example, if the in_file is
./folder/image_1.jpg
./folder/image_2.jpg
and the self.prefix is './folder/', then self.unique_ids would be written as
['image_1.jpg', 'image_2.jpg']
"""
if prefix is None:
prefix = self.prefix
with open(in_file) as f:
self.unique_ids = [x.strip().replace(prefix, '') for x in f]
return
class adience_dataset(dataset):
def __init__(self, metadata_folder='./'):
"""
Create the Adience Dataset class.
Ususally run as:
adi = Adience_dataset(metadata_folder)
adi.select_unique_ids()
adi.write_unique_ids('adience_images.txt')
Or if the unique_ids have already been created:
adi = Adience_dataset(metadata_folder)
adi.read_unique_ids('adience_images.txt')
"""
self.metadata = self.load_metadata(metadata_folder)
self.prefix = 'data/adience/faces/'
return
def load_metadata(self, metadata_folder):
def adience_resolve_class_label(age):
"""
Given an age, what is the age group?
"""
if age == '(0, 2)' or age == '2':
age_id = 0
elif age == '(4, 6)' or age == '3':
age_id = 1
elif age == '(8, 12)' or age == '(8, 23)' or age == '13':
age_id = 2
elif age == '(15, 20)' or age == '22':
age_id = 3
elif age == '(25, 32)' or age == '(27, 32)' or age in ['23', '29', '34', '35']:
age_id = 4
elif age == '(38, 42)' or age == '(38, 43)' or age == '(38, 48)' or age in ['36', '42', '45']:
age_id = 5
elif age == '(48, 53)' or age in ['46', '55']:
age_id = 6
elif age == '(60, 100)' or age in ['57', '58']:
age_id = 7
else:
raise ValueError("Not sure how to handle this age: {}".format(age))
return age_id
if metadata_folder[-1] == '/':
metadata_folder = metadata_folder[:-1]
fold_0 = pd.read_csv(f'{metadata_folder}/fold_0_data.txt', sep='\t')
fold_1 = pd.read_csv(f'{metadata_folder}/fold_1_data.txt', sep='\t')
fold_2 = pd.read_csv(f'{metadata_folder}/fold_2_data.txt', sep='\t')
fold_3 = pd.read_csv(f'{metadata_folder}/fold_3_data.txt', sep='\t')
fold_4 = pd.read_csv(f'{metadata_folder}/fold_4_data.txt', sep='\t')
# get only those data that have an age and gender is m or f
fold_0 = fold_0[np.logical_and(fold_0['age'] != 'None',
np.logical_or(fold_0['gender'] == 'm', fold_0['gender'] == 'f'))]
fold_1 = fold_1[np.logical_and(fold_1['age'] != 'None',
np.logical_or(fold_1['gender'] == 'm', fold_1['gender'] == 'f'))]
fold_2 = fold_2[np.logical_and(fold_2['age'] != 'None',
np.logical_or(fold_2['gender'] == 'm', fold_2['gender'] == 'f'))]
fold_3 = fold_3[np.logical_and(fold_3['age'] != 'None',
np.logical_or(fold_3['gender'] == 'm', fold_3['gender'] == 'f'))]
fold_4 = fold_4[np.logical_and(fold_4['age'] != 'None',
np.logical_or(fold_4['gender'] == 'm', fold_4['gender'] == 'f'))]
adience = | pd.concat([fold_0,fold_1,fold_2,fold_3,fold_4]) | pandas.concat |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, scalar='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, scalar='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
- ``_low`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [0, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 0, 0],
'b': [0, -1, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
- ``_high`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/+4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [6, 6, 4],
'b': [7, 7, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_low`` = ['a', 'c'].
- ``_high`` = ['b'].
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(-2).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high=['b'], strict=True)
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'c#']
instance._columns_to_reconstruct = ['a', 'c']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(1)] * 3,
'c#': [np.log(1)] * 3,
})
out = instance.reverse_transform(transformed)
print(out)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_positive(self):
"""Test the ``GreaterThan.reverse_transform`` method for positive constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(4)],
'b#': [np.log(5), np.log(6), np.log(0)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 0],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_negative(self):
"""Test the ``GreaterThan.reverse_transform`` method for negative constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [-1, -2, 1],
'b': [-4, -5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(0)],
'b#': [np.log(5), np.log(6), np.log(2)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [-1, -2, 0],
'b': [-4, -5, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_scalar`` variable to ``'low'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = None
"""
# Run
instance = Positive(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Positive.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'high' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = 'high'
"""
# Run
instance = Positive(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_scalar`` variable to ``'high'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = None
"""
# Run
instance = Negative(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Negative.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'low' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = 'low'
"""
# Run
instance = Negative(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
if data['a'] is None or data['b'] is None:
return None
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance,
import the formula to use for the computation, and
set the specified constraint column.
Input:
- column = 'col'
- formula = new_column
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
assert instance.constraint_columns == ('col', )
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column,
handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_with_nans(self):
"""Test the ``ColumnFormula.is_valid`` method for with a formula that produces nans.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, None],
'c': [5, 7, None]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test__transform(self):
"""Test the ``ColumnFormula._transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_without_dropping_column(self):
"""Test the ``ColumnFormula._transform`` method without dropping the column.
If `drop_column` is false, expect to not drop the constraint column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column, drop_column=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_missing_column(self):
"""Test the ``ColumnFormula._transform`` method when the constraint column is missing.
When ``_transform`` is called with data that does not contain the constraint column,
expect to return the data as-is.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, unchanged (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_fit_only_one_datetime_arg(self):
"""Test the ``Between.fit`` method by passing in only one arg as datetime.
If only one of the bound parameters is a datetime type, expect a ValueError.
Input:
- low is an int scalar
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
column = 'a'
low = 0.0
high = | pd.to_datetime('2021-01-01') | pandas.to_datetime |
import pandas as pd
import numpy as np
from random import sample
from xgboost import XGBRegressor
from random import choices,seed
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.stats import t
import os
os.chdir("c://users/jliv/downloads/")
dat=pd.read_csv("auto-mpg.data",header=None)
"""
1. mpg: continuous
2. cylinders: multi-valued discrete
3. displacement: continuous
4. horsepower: continuous
5. weight: continuous
6. acceleration: continuous
7. model year: multi-valued discrete
8. origin: multi-valued discrete
9. car name: string (unique for each instance)
"""
| pd.set_option("display.max_columns",19) | pandas.set_option |
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
browser = webdriver.Chrome()
wait = WebDriverWait(browser, 5)
browser.get('https://www.beyondblue.org.au/get-support/online-forums/')
forum_name_list = ['Depression', 'Suicidal thoughts and self-harm']
forum_xpath_list = ['//*[@id="MainContentPlaceholder_C006_forumsFrontendList_ctl00_ctl00_pnlMain"]/table/tbody[2]/tr[2]/td[1]/a', '//*[@id="MainContentPlaceholder_C006_forumsFrontendList_ctl00_ctl00_pnlMain"]/table/tbody[2]/tr[4]/td[1]/a']
forum_count = 0
df = pd.DataFrame()
for xpath in forum_xpath_list:
forum_name = forum_name_list[forum_count]
forum_button = wait.until(EC.element_to_be_clickable((By.XPATH, xpath))).click()
forum_error = False
forum_page_count = 1
while forum_error is False:
try:
thread_location_list = wait.until(EC.visibility_of_all_elements_located((By.CLASS_NAME, 'sfforumThreadTitle')))
thread_names_list = []
forum_page_url = browser.current_url
for thread_location in thread_location_list:
thread_names_list.append(thread_location.text)
thread_count = 0
for thread_name in thread_names_list:
try:
thread_button = wait.until(EC.element_to_be_clickable((By.LINK_TEXT, thread_name))).click()
thread_error = False
thread_page_count = 1
while thread_error is False:
try:
content_list = []
author_list = []
content_location_list = wait.until(EC.visibility_of_all_elements_located((By.CLASS_NAME, 'postAndSig')))
for content_location in content_location_list:
content_list.append(content_location.text)
if(thread_page_count == 1):
original_comment = content_list[0]
author_location_list = wait.until(EC.visibility_of_all_elements_located((By.CLASS_NAME, 'sfforumUser')))
for author_location in author_location_list:
author_list.append(author_location.text)
if(thread_page_count == 1):
original_author = author_list[0]
original_content_list = [original_comment] * len(content_list)
original_author_list = [original_author] * len(author_list)
forum_list = [forum_name] * len(author_list)
dictionary = {'Forum':forum_list, 'Original Author':original_author_list, 'Original Post':original_content_list, 'Comment':content_list, 'Comment Author':author_list}
page_df = | pd.DataFrame(dictionary) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_pct_change(self):
# GH#11150
pnl = DataFrame([np.arange(0, 40, 10),
np.arange(0, 40, 10),
np.arange(0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize('opname', ['any', 'all'])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
has_skipna=True)
assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [pd.Timestamp('1960-02-15'),
pd.Timestamp('1960-02-16'),
pd.NaT,
pd.NaT]
df = DataFrame({
"A": float_data,
"B": datetime_data
})
result = df.any(1)
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
def test_any_all_bool_only(self):
# GH 25101
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None]})
result = df.all(bool_only=True)
expected = Series(dtype=np.bool)
tm.assert_series_equal(result, expected)
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True]})
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
# # Mix
# GH 21484
# (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
# 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
])
def test_any_all_np_func(self, func, data, expected):
# GH 19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# GH 19976
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level='out')
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH 4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# GH 16991
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH 4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH 16394
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH 15473
df1_ts = DataFrame({'date':
pd.to_datetime(['2014-01-01', '2014-01-02'])})
df1_td = DataFrame({'date':
[pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})
df2 = DataFrame({'date': []})
df3 = DataFrame()
expected = DataFrame({'date': [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Rounding
def test_round(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],
'col2': [1.23, 2.23, 3.23]})
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with pytest.raises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
tm.assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
tm.assert_frame_equal(df.round(partial_round_dict),
expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
tm.assert_frame_equal(df.round(wrong_round_dict),
expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Negative numbers
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
tm.assert_frame_equal(big_df.round(negative_round_dict),
expected_neg_rounded)
# nan in Series round
nan_round_Series = | Series({'col1': np.nan, 'col2': 1}) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.